2024-11-12 19:32:27,519 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-12 19:32:27,536 main DEBUG Took 0.013738 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 19:32:27,537 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 19:32:27,538 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 19:32:27,539 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 19:32:27,541 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,560 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 19:32:27,582 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,584 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,592 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,593 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,594 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,594 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,596 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,596 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,597 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,598 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,600 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,600 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,601 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,602 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,603 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,603 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,604 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,605 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,605 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,606 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,608 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,608 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,609 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 19:32:27,610 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,611 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 19:32:27,613 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 19:32:27,615 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 19:32:27,618 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 19:32:27,618 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 19:32:27,620 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 19:32:27,621 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 19:32:27,634 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 19:32:27,639 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 19:32:27,641 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 19:32:27,642 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 19:32:27,643 main DEBUG createAppenders(={Console}) 2024-11-12 19:32:27,644 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-12 19:32:27,644 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-12 19:32:27,645 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-12 19:32:27,647 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 19:32:27,648 main DEBUG OutputStream closed 2024-11-12 19:32:27,648 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 19:32:27,648 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 19:32:27,649 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-12 19:32:27,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 19:32:27,803 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 19:32:27,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 19:32:27,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 19:32:27,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 19:32:27,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 19:32:27,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 19:32:27,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 19:32:27,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 19:32:27,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 19:32:27,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 19:32:27,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 19:32:27,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 19:32:27,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 19:32:27,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 19:32:27,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 19:32:27,817 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 19:32:27,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 19:32:27,823 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 19:32:27,825 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-12 19:32:27,831 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 19:32:27,842 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-12T19:32:28,233 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89 2024-11-12 19:32:28,238 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 19:32:28,239 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T19:32:28,252 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-12T19:32:28,294 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T19:32:28,301 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a, deleteOnExit=true 2024-11-12T19:32:28,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-12T19:32:28,307 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/test.cache.data in system properties and HBase conf 2024-11-12T19:32:28,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T19:32:28,311 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.log.dir in system properties and HBase conf 2024-11-12T19:32:28,312 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T19:32:28,313 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T19:32:28,313 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-12T19:32:28,531 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T19:32:28,677 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T19:32:28,688 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T19:32:28,689 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T19:32:28,690 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T19:32:28,691 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T19:32:28,692 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T19:32:28,693 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T19:32:28,694 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T19:32:28,695 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T19:32:28,696 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T19:32:28,696 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/nfs.dump.dir in system properties and HBase conf 2024-11-12T19:32:28,697 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/java.io.tmpdir in system properties and HBase conf 2024-11-12T19:32:28,697 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T19:32:28,698 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T19:32:28,699 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T19:32:30,444 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T19:32:30,572 INFO [Time-limited test {}] log.Log(170): Logging initialized @4507ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T19:32:30,688 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T19:32:30,802 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T19:32:30,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T19:32:30,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T19:32:30,872 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T19:32:30,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T19:32:30,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.log.dir/,AVAILABLE} 2024-11-12T19:32:30,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T19:32:31,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/java.io.tmpdir/jetty-localhost-46481-hadoop-hdfs-3_4_1-tests_jar-_-any-13989074922526103465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T19:32:31,269 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:46481} 2024-11-12T19:32:31,269 INFO [Time-limited test {}] server.Server(415): Started @5206ms 2024-11-12T19:32:32,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T19:32:32,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T19:32:32,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T19:32:32,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T19:32:32,309 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T19:32:32,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@200c8689{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.log.dir/,AVAILABLE} 2024-11-12T19:32:32,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T19:32:32,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76026208{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/java.io.tmpdir/jetty-localhost-39459-hadoop-hdfs-3_4_1-tests_jar-_-any-4398418284716165014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T19:32:32,495 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7489a976{HTTP/1.1, (http/1.1)}{localhost:39459} 2024-11-12T19:32:32,496 INFO [Time-limited test {}] server.Server(415): Started @6433ms 2024-11-12T19:32:32,581 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T19:32:33,783 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data1/current/BP-1795121182-172.17.0.3-1731439949678/current, will proceed with Du for space computation calculation, 2024-11-12T19:32:33,783 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data2/current/BP-1795121182-172.17.0.3-1731439949678/current, will proceed with Du for space computation calculation, 2024-11-12T19:32:33,874 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T19:32:33,986 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5e2273689091489 with lease ID 0xe7c165521f526285: Processing first storage report for DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8 from datanode DatanodeRegistration(127.0.0.1:43501, datanodeUuid=52363b5f-b0a7-403b-89d2-318900fbb25e, infoPort=44513, infoSecurePort=0, ipcPort=40097, storageInfo=lv=-57;cid=testClusterID;nsid=1237295884;c=1731439949678) 2024-11-12T19:32:33,988 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5e2273689091489 with lease ID 0xe7c165521f526285: from storage DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8 node DatanodeRegistration(127.0.0.1:43501, datanodeUuid=52363b5f-b0a7-403b-89d2-318900fbb25e, infoPort=44513, infoSecurePort=0, ipcPort=40097, storageInfo=lv=-57;cid=testClusterID;nsid=1237295884;c=1731439949678), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-12T19:32:33,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5e2273689091489 with lease ID 0xe7c165521f526285: Processing first storage report for DS-acc581ce-39b0-461f-85e7-c81dfc5946af from datanode DatanodeRegistration(127.0.0.1:43501, datanodeUuid=52363b5f-b0a7-403b-89d2-318900fbb25e, infoPort=44513, infoSecurePort=0, ipcPort=40097, storageInfo=lv=-57;cid=testClusterID;nsid=1237295884;c=1731439949678) 2024-11-12T19:32:33,993 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5e2273689091489 with lease ID 0xe7c165521f526285: from storage DS-acc581ce-39b0-461f-85e7-c81dfc5946af node DatanodeRegistration(127.0.0.1:43501, datanodeUuid=52363b5f-b0a7-403b-89d2-318900fbb25e, infoPort=44513, infoSecurePort=0, ipcPort=40097, storageInfo=lv=-57;cid=testClusterID;nsid=1237295884;c=1731439949678), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T19:32:34,002 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89 2024-11-12T19:32:34,231 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/zookeeper_0, clientPort=60358, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T19:32:34,254 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60358 2024-11-12T19:32:34,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:34,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:34,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741825_1001 (size=7) 2024-11-12T19:32:34,810 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 with version=8 2024-11-12T19:32:34,811 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/hbase-staging 2024-11-12T19:32:35,084 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T19:32:35,443 INFO [Time-limited test {}] client.ConnectionUtils(129): master/81d69e608036:0 server-side Connection retries=45 2024-11-12T19:32:35,468 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:35,469 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T19:32:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:35,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T19:32:35,672 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T19:32:35,749 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T19:32:35,761 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T19:32:35,766 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T19:32:35,801 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 50446 (auto-detected) 2024-11-12T19:32:35,802 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-12T19:32:35,829 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:46265 2024-11-12T19:32:35,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:35,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:35,862 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46265 connecting to ZooKeeper ensemble=127.0.0.1:60358 2024-11-12T19:32:35,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462650x0, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T19:32:35,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46265-0x10131d2ab780000 connected 2024-11-12T19:32:36,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T19:32:36,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T19:32:36,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T19:32:36,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46265 2024-11-12T19:32:36,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46265 2024-11-12T19:32:36,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46265 2024-11-12T19:32:36,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46265 2024-11-12T19:32:36,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46265 2024-11-12T19:32:36,388 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8, hbase.cluster.distributed=false 2024-11-12T19:32:36,496 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/81d69e608036:0 server-side Connection retries=45 2024-11-12T19:32:36,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:36,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:36,503 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T19:32:36,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T19:32:36,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T19:32:36,507 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T19:32:36,519 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T19:32:36,520 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:33067 2024-11-12T19:32:36,523 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T19:32:36,542 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T19:32:36,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:36,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:36,577 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33067 connecting to ZooKeeper ensemble=127.0.0.1:60358 2024-11-12T19:32:36,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:330670x0, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T19:32:36,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330670x0, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T19:32:36,651 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33067-0x10131d2ab780001 connected 2024-11-12T19:32:36,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T19:32:36,671 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T19:32:36,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33067 2024-11-12T19:32:36,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33067 2024-11-12T19:32:36,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33067 2024-11-12T19:32:36,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33067 2024-11-12T19:32:36,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33067 2024-11-12T19:32:36,711 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/81d69e608036,46265,1731439955074 2024-11-12T19:32:36,743 DEBUG [M:0;81d69e608036:46265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;81d69e608036:46265 2024-11-12T19:32:36,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T19:32:36,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T19:32:36,758 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/81d69e608036,46265,1731439955074 2024-11-12T19:32:36,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T19:32:36,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T19:32:36,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:36,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:36,864 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T19:32:36,870 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T19:32:36,875 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/81d69e608036,46265,1731439955074 from backup master directory 2024-11-12T19:32:36,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/81d69e608036,46265,1731439955074 2024-11-12T19:32:36,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T19:32:36,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T19:32:36,907 WARN [master/81d69e608036:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T19:32:36,907 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=81d69e608036,46265,1731439955074 2024-11-12T19:32:36,912 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T19:32:36,922 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T19:32:37,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741826_1002 (size=42) 2024-11-12T19:32:37,527 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/hbase.id with ID: 5101d27e-c8c7-4fd8-94de-9bf1344a4d77 2024-11-12T19:32:37,600 INFO [master/81d69e608036:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T19:32:37,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:37,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:37,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741827_1003 (size=196) 2024-11-12T19:32:38,160 INFO [master/81d69e608036:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:32:38,164 INFO [master/81d69e608036:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T19:32:38,188 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:38,196 INFO [master/81d69e608036:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T19:32:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741828_1004 (size=1189) 2024-11-12T19:32:38,305 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store 2024-11-12T19:32:38,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741829_1005 (size=34) 2024-11-12T19:32:38,784 INFO [master/81d69e608036:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T19:32:38,785 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:38,787 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T19:32:38,787 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:32:38,787 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:32:38,788 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T19:32:38,788 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:32:38,788 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:32:38,789 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-12T19:32:38,794 WARN [master/81d69e608036:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/.initializing 2024-11-12T19:32:38,794 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/WALs/81d69e608036,46265,1731439955074 2024-11-12T19:32:38,806 INFO [master/81d69e608036:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T19:32:38,825 INFO [master/81d69e608036:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=81d69e608036%2C46265%2C1731439955074, suffix=, logDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/WALs/81d69e608036,46265,1731439955074, archiveDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/oldWALs, maxLogs=10 2024-11-12T19:32:38,857 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(599): When create output stream for /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/WALs/81d69e608036,46265,1731439955074/81d69e608036%2C46265%2C1731439955074.1731439958832, exclude list is [], retry=0 2024-11-12T19:32:38,881 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43501,DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8,DISK] 2024-11-12T19:32:38,886 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-12T19:32:38,952 INFO [master/81d69e608036:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/WALs/81d69e608036,46265,1731439955074/81d69e608036%2C46265%2C1731439955074.1731439958832 2024-11-12T19:32:38,954 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44513:44513)] 2024-11-12T19:32:38,954 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:32:38,955 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:38,963 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:38,965 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T19:32:39,108 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:39,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:39,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T19:32:39,126 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:39,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:39,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T19:32:39,148 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:39,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:39,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T19:32:39,158 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:39,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:39,171 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,175 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,190 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T19:32:39,196 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T19:32:39,211 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:32:39,213 INFO [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68009338, jitterRate=0.013418108224868774}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T19:32:39,221 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-12T19:32:39,229 INFO [master/81d69e608036:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T19:32:39,283 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d2a9cd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:39,339 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-12T19:32:39,357 INFO [master/81d69e608036:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T19:32:39,357 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T19:32:39,360 INFO [master/81d69e608036:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T19:32:39,367 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 7 msec 2024-11-12T19:32:39,377 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 9 msec 2024-11-12T19:32:39,378 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T19:32:39,447 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T19:32:39,483 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T19:32:39,543 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-12T19:32:39,546 INFO [master/81d69e608036:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T19:32:39,559 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T19:32:39,595 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-12T19:32:39,599 INFO [master/81d69e608036:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T19:32:39,611 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T19:32:39,659 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-12T19:32:39,671 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T19:32:39,687 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T19:32:39,707 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T19:32:39,711 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T19:32:39,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T19:32:39,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T19:32:39,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:39,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:39,779 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=81d69e608036,46265,1731439955074, sessionid=0x10131d2ab780000, setting cluster-up flag (Was=false) 2024-11-12T19:32:39,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:39,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:40,020 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T19:32:40,022 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=81d69e608036,46265,1731439955074 2024-11-12T19:32:40,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:40,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:40,218 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T19:32:40,256 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=81d69e608036,46265,1731439955074 2024-11-12T19:32:40,349 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;81d69e608036:33067 2024-11-12T19:32:40,391 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1008): ClusterId : 5101d27e-c8c7-4fd8-94de-9bf1344a4d77 2024-11-12T19:32:40,407 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T19:32:40,409 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-12T19:32:40,418 INFO [master/81d69e608036:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-12T19:32:40,425 INFO [master/81d69e608036:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T19:32:40,434 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 81d69e608036,46265,1731439955074 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T19:32:40,445 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/81d69e608036:0, corePoolSize=5, maxPoolSize=5 2024-11-12T19:32:40,445 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/81d69e608036:0, corePoolSize=5, maxPoolSize=5 2024-11-12T19:32:40,445 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/81d69e608036:0, corePoolSize=5, maxPoolSize=5 2024-11-12T19:32:40,446 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/81d69e608036:0, corePoolSize=5, maxPoolSize=5 2024-11-12T19:32:40,446 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/81d69e608036:0, corePoolSize=10, maxPoolSize=10 2024-11-12T19:32:40,446 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,447 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/81d69e608036:0, corePoolSize=2, maxPoolSize=2 2024-11-12T19:32:40,448 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,452 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T19:32:40,452 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T19:32:40,473 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731439990473 2024-11-12T19:32:40,476 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T19:32:40,477 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-12T19:32:40,477 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T19:32:40,477 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-12T19:32:40,479 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T19:32:40,480 DEBUG [RS:0;81d69e608036:33067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26e8020d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:40,482 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T19:32:40,483 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T19:32:40,483 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T19:32:40,484 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T19:32:40,487 DEBUG [RS:0;81d69e608036:33067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c491834, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=81d69e608036/172.17.0.3:0 2024-11-12T19:32:40,492 INFO [RS:0;81d69e608036:33067 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-12T19:32:40,492 INFO [RS:0;81d69e608036:33067 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-12T19:32:40,492 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-12T19:32:40,493 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:40,494 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T19:32:40,498 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,504 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(3073): reportForDuty to master=81d69e608036,46265,1731439955074 with isa=81d69e608036/172.17.0.3:33067, startcode=1731439956493 2024-11-12T19:32:40,504 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T19:32:40,506 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T19:32:40,507 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T19:32:40,512 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T19:32:40,512 INFO [master/81d69e608036:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T19:32:40,537 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/81d69e608036:0:becomeActiveMaster-HFileCleaner.large.0-1731439960526,5,FailOnTimeoutGroup] 2024-11-12T19:32:40,538 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/81d69e608036:0:becomeActiveMaster-HFileCleaner.small.0-1731439960537,5,FailOnTimeoutGroup] 2024-11-12T19:32:40,539 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,539 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T19:32:40,541 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,541 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,542 DEBUG [RS:0;81d69e608036:33067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T19:32:40,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741831_1007 (size=1039) 2024-11-12T19:32:40,581 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-12T19:32:40,582 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:32:40,632 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59131, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T19:32:40,645 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46265 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 81d69e608036,33067,1731439956493 2024-11-12T19:32:40,652 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46265 {}] master.ServerManager(486): Registering regionserver=81d69e608036,33067,1731439956493 2024-11-12T19:32:40,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741832_1008 (size=32) 2024-11-12T19:32:40,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:40,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T19:32:40,692 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:32:40,693 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41367 2024-11-12T19:32:40,693 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-12T19:32:40,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T19:32:40,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:40,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:40,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T19:32:40,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T19:32:40,718 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:40,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:40,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T19:32:40,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T19:32:40,749 DEBUG [RS:0;81d69e608036:33067 {}] zookeeper.ZKUtil(111): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/81d69e608036,33067,1731439956493 2024-11-12T19:32:40,750 WARN [RS:0;81d69e608036:33067 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T19:32:40,750 INFO [RS:0;81d69e608036:33067 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T19:32:40,750 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493 2024-11-12T19:32:40,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T19:32:40,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:40,757 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [81d69e608036,33067,1731439956493] 2024-11-12T19:32:40,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:40,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740 2024-11-12T19:32:40,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740 2024-11-12T19:32:40,783 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:32:40,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-12T19:32:40,811 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-12T19:32:40,835 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:32:40,851 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67979743, jitterRate=0.012977108359336853}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:32:40,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-12T19:32:40,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-12T19:32:40,859 INFO [RS:0;81d69e608036:33067 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T19:32:40,857 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-12T19:32:40,861 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-12T19:32:40,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T19:32:40,863 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T19:32:40,870 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-12T19:32:40,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-12T19:32:40,884 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-12T19:32:40,884 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-12T19:32:40,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T19:32:40,904 INFO [RS:0;81d69e608036:33067 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T19:32:40,917 INFO [RS:0;81d69e608036:33067 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T19:32:40,918 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,919 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-12T19:32:40,923 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T19:32:40,931 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T19:32:40,936 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,937 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,937 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,938 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,938 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,938 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,938 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/81d69e608036:0, corePoolSize=2, maxPoolSize=2 2024-11-12T19:32:40,939 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,943 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,943 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,943 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,944 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/81d69e608036:0, corePoolSize=1, maxPoolSize=1 2024-11-12T19:32:40,944 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/81d69e608036:0, corePoolSize=3, maxPoolSize=3 2024-11-12T19:32:40,944 DEBUG [RS:0;81d69e608036:33067 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0, corePoolSize=3, maxPoolSize=3 2024-11-12T19:32:40,950 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,950 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,950 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,953 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:40,953 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,33067,1731439956493-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T19:32:41,002 INFO [RS:0;81d69e608036:33067 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T19:32:41,005 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,33067,1731439956493-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:41,043 INFO [RS:0;81d69e608036:33067 {}] regionserver.Replication(204): 81d69e608036,33067,1731439956493 started 2024-11-12T19:32:41,043 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1767): Serving as 81d69e608036,33067,1731439956493, RpcServer on 81d69e608036/172.17.0.3:33067, sessionid=0x10131d2ab780001 2024-11-12T19:32:41,044 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T19:32:41,045 DEBUG [RS:0;81d69e608036:33067 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 81d69e608036,33067,1731439956493 2024-11-12T19:32:41,045 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '81d69e608036,33067,1731439956493' 2024-11-12T19:32:41,045 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T19:32:41,049 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T19:32:41,051 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T19:32:41,052 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T19:32:41,055 DEBUG [RS:0;81d69e608036:33067 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 81d69e608036,33067,1731439956493 2024-11-12T19:32:41,055 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '81d69e608036,33067,1731439956493' 2024-11-12T19:32:41,055 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T19:32:41,063 DEBUG [RS:0;81d69e608036:33067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T19:32:41,071 DEBUG [RS:0;81d69e608036:33067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T19:32:41,071 INFO [RS:0;81d69e608036:33067 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T19:32:41,072 INFO [RS:0;81d69e608036:33067 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T19:32:41,082 WARN [81d69e608036:46265 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-12T19:32:41,180 INFO [RS:0;81d69e608036:33067 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T19:32:41,186 INFO [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=81d69e608036%2C33067%2C1731439956493, suffix=, logDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493, archiveDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/oldWALs, maxLogs=32 2024-11-12T19:32:41,234 DEBUG [RS:0;81d69e608036:33067 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(599): When create output stream for /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493/81d69e608036%2C33067%2C1731439956493.1731439961189, exclude list is [], retry=0 2024-11-12T19:32:41,247 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43501,DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8,DISK] 2024-11-12T19:32:41,283 INFO [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493/81d69e608036%2C33067%2C1731439956493.1731439961189 2024-11-12T19:32:41,287 DEBUG [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44513:44513)] 2024-11-12T19:32:41,336 DEBUG [81d69e608036:46265 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T19:32:41,344 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:41,356 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 81d69e608036,33067,1731439956493, state=OPENING 2024-11-12T19:32:41,387 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T19:32:41,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:41,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:41,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T19:32:41,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T19:32:41,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:32:41,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:41,613 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T19:32:41,625 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T19:32:41,664 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-12T19:32:41,665 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T19:32:41,665 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-12T19:32:41,691 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=81d69e608036%2C33067%2C1731439956493.meta, suffix=.meta, logDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493, archiveDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/oldWALs, maxLogs=32 2024-11-12T19:32:41,724 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(599): When create output stream for /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493/81d69e608036%2C33067%2C1731439956493.meta.1731439961694.meta, exclude list is [], retry=0 2024-11-12T19:32:41,743 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43501,DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8,DISK] 2024-11-12T19:32:41,759 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/WALs/81d69e608036,33067,1731439956493/81d69e608036%2C33067%2C1731439956493.meta.1731439961694.meta 2024-11-12T19:32:41,767 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44513:44513)] 2024-11-12T19:32:41,767 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:32:41,769 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T19:32:41,900 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T19:32:41,911 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T19:32:41,918 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T19:32:41,918 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:41,918 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-12T19:32:41,919 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-12T19:32:41,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T19:32:41,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T19:32:41,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:41,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:41,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T19:32:41,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T19:32:41,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:41,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:41,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T19:32:41,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T19:32:41,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:41,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T19:32:41,977 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740 2024-11-12T19:32:41,983 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740 2024-11-12T19:32:41,998 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:32:42,010 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-12T19:32:42,017 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68615744, jitterRate=0.022454261779785156}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:32:42,021 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-12T19:32:42,033 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731439961602 2024-11-12T19:32:42,051 DEBUG [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T19:32:42,052 INFO [RS_OPEN_META-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-12T19:32:42,056 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:42,061 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 81d69e608036,33067,1731439956493, state=OPEN 2024-11-12T19:32:42,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T19:32:42,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T19:32:42,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T19:32:42,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T19:32:42,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T19:32:42,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=81d69e608036,33067,1731439956493 in 742 msec 2024-11-12T19:32:42,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T19:32:42,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2610 sec 2024-11-12T19:32:42,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.8610 sec 2024-11-12T19:32:42,188 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731439962188, completionTime=-1 2024-11-12T19:32:42,188 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T19:32:42,189 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-12T19:32:42,242 DEBUG [hconnection-0xf069af2-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:42,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:42,263 INFO [master/81d69e608036:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-12T19:32:42,264 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731440022263 2024-11-12T19:32:42,264 INFO [master/81d69e608036:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731440082264 2024-11-12T19:32:42,264 INFO [master/81d69e608036:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 75 msec 2024-11-12T19:32:42,344 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:42,345 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:42,345 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:42,347 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-81d69e608036:46265, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:42,348 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:42,365 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-12T19:32:42,368 DEBUG [master/81d69e608036:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-12T19:32:42,369 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T19:32:42,377 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-12T19:32:42,382 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:32:42,384 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:42,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:32:42,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741835_1011 (size=358) 2024-11-12T19:32:42,873 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4793b237becb5eefe1e5fde3a3e5b617, NAME => 'hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:32:42,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741836_1012 (size=42) 2024-11-12T19:32:42,959 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:42,959 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4793b237becb5eefe1e5fde3a3e5b617, disabling compactions & flushes 2024-11-12T19:32:42,960 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:42,960 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:42,960 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. after waiting 0 ms 2024-11-12T19:32:42,960 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:42,960 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:42,960 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4793b237becb5eefe1e5fde3a3e5b617: 2024-11-12T19:32:42,964 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:32:42,973 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1731439962965"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731439962965"}]},"ts":"1731439962965"} 2024-11-12T19:32:43,024 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:32:43,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:32:43,034 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439963030"}]},"ts":"1731439963030"} 2024-11-12T19:32:43,045 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-12T19:32:43,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4793b237becb5eefe1e5fde3a3e5b617, ASSIGN}] 2024-11-12T19:32:43,142 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4793b237becb5eefe1e5fde3a3e5b617, ASSIGN 2024-11-12T19:32:43,144 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4793b237becb5eefe1e5fde3a3e5b617, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:32:43,296 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4793b237becb5eefe1e5fde3a3e5b617, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:43,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4793b237becb5eefe1e5fde3a3e5b617, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:32:43,457 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:43,469 INFO [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:43,469 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4793b237becb5eefe1e5fde3a3e5b617, NAME => 'hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:32:43,470 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,470 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:43,470 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,470 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,487 INFO [StoreOpener-4793b237becb5eefe1e5fde3a3e5b617-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,510 INFO [StoreOpener-4793b237becb5eefe1e5fde3a3e5b617-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4793b237becb5eefe1e5fde3a3e5b617 columnFamilyName info 2024-11-12T19:32:43,511 DEBUG [StoreOpener-4793b237becb5eefe1e5fde3a3e5b617-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:43,519 INFO [StoreOpener-4793b237becb5eefe1e5fde3a3e5b617-1 {}] regionserver.HStore(327): Store=4793b237becb5eefe1e5fde3a3e5b617/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:43,538 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,539 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,567 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:32:43,585 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:32:43,591 INFO [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4793b237becb5eefe1e5fde3a3e5b617; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65493549, jitterRate=-0.024070069193840027}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T19:32:43,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4793b237becb5eefe1e5fde3a3e5b617: 2024-11-12T19:32:43,602 INFO [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617., pid=6, masterSystemTime=1731439963457 2024-11-12T19:32:43,616 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:43,616 INFO [RS_OPEN_PRIORITY_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:32:43,618 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4793b237becb5eefe1e5fde3a3e5b617, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:43,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T19:32:43,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4793b237becb5eefe1e5fde3a3e5b617, server=81d69e608036,33067,1731439956493 in 342 msec 2024-11-12T19:32:43,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T19:32:43,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4793b237becb5eefe1e5fde3a3e5b617, ASSIGN in 520 msec 2024-11-12T19:32:43,673 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:32:43,674 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439963673"}]},"ts":"1731439963673"} 2024-11-12T19:32:43,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-12T19:32:43,706 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-12T19:32:43,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:43,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-12T19:32:43,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:32:43,716 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:32:43,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3470 sec 2024-11-12T19:32:43,764 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-12T19:32:43,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-12T19:32:43,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 66 msec 2024-11-12T19:32:43,847 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-12T19:32:43,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-12T19:32:43,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 45 msec 2024-11-12T19:32:43,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-12T19:32:43,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-12T19:32:43,953 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 7.045sec 2024-11-12T19:32:43,955 INFO [master/81d69e608036:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T19:32:43,957 INFO [master/81d69e608036:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T19:32:43,959 INFO [master/81d69e608036:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T19:32:43,960 INFO [master/81d69e608036:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T19:32:43,960 INFO [master/81d69e608036:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T19:32:43,962 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T19:32:43,963 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T19:32:43,983 DEBUG [master/81d69e608036:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-12T19:32:43,984 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T19:32:43,985 INFO [master/81d69e608036:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=81d69e608036,46265,1731439955074-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T19:32:44,063 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x033bfacb to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24096af1 2024-11-12T19:32:44,064 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-12T19:32:44,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9b1490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:44,119 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T19:32:44,120 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T19:32:44,160 DEBUG [hconnection-0x5e1a7e74-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:44,181 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:44,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=81d69e608036,46265,1731439955074 2024-11-12T19:32:44,222 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=1283, ProcessCount=11, AvailableMemoryMB=3502 2024-11-12T19:32:44,240 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:32:44,244 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:32:44,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:32:44,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:32:44,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:32:44,347 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:32:44,348 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:44,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-12T19:32:44,351 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:32:44,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-12T19:32:44,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741837_1013 (size=960) 2024-11-12T19:32:44,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-12T19:32:44,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-12T19:32:44,800 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:32:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741838_1014 (size=53) 2024-11-12T19:32:44,837 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:44,837 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 76d7848c1ddd620b84cb604cad3a693a, disabling compactions & flushes 2024-11-12T19:32:44,837 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:44,837 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:44,838 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. after waiting 0 ms 2024-11-12T19:32:44,838 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:44,838 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:44,838 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:44,842 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:32:44,843 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731439964842"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731439964842"}]},"ts":"1731439964842"} 2024-11-12T19:32:44,847 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:32:44,850 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:32:44,851 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439964850"}]},"ts":"1731439964850"} 2024-11-12T19:32:44,857 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:32:44,878 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, ASSIGN}] 2024-11-12T19:32:44,883 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, ASSIGN 2024-11-12T19:32:44,886 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:32:44,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-12T19:32:45,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=76d7848c1ddd620b84cb604cad3a693a, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:45,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:32:45,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:45,204 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,205 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:32:45,205 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,205 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:32:45,206 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,206 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,209 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,214 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:32:45,214 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76d7848c1ddd620b84cb604cad3a693a columnFamilyName A 2024-11-12T19:32:45,214 DEBUG [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:45,216 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(327): Store=76d7848c1ddd620b84cb604cad3a693a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:45,216 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,218 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:32:45,219 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76d7848c1ddd620b84cb604cad3a693a columnFamilyName B 2024-11-12T19:32:45,219 DEBUG [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:45,220 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(327): Store=76d7848c1ddd620b84cb604cad3a693a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:45,220 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,223 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:32:45,224 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76d7848c1ddd620b84cb604cad3a693a columnFamilyName C 2024-11-12T19:32:45,224 DEBUG [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:32:45,225 INFO [StoreOpener-76d7848c1ddd620b84cb604cad3a693a-1 {}] regionserver.HStore(327): Store=76d7848c1ddd620b84cb604cad3a693a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:32:45,226 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,227 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,229 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,235 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:32:45,240 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,244 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:32:45,246 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 76d7848c1ddd620b84cb604cad3a693a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68011417, jitterRate=0.013449087738990784}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:32:45,247 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:45,249 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., pid=11, masterSystemTime=1731439965197 2024-11-12T19:32:45,253 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,253 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,254 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=76d7848c1ddd620b84cb604cad3a693a, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:32:45,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-12T19:32:45,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 in 217 msec 2024-11-12T19:32:45,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-12T19:32:45,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, ASSIGN in 385 msec 2024-11-12T19:32:45,273 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:32:45,273 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439965273"}]},"ts":"1731439965273"} 2024-11-12T19:32:45,279 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:32:45,296 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:32:45,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 959 msec 2024-11-12T19:32:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-12T19:32:45,489 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-12T19:32:45,494 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f2052a7 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22ff9396 2024-11-12T19:32:45,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@565a0a51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,507 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,510 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,515 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:32:45,518 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:32:45,533 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30c68ddf to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd26d7b 2024-11-12T19:32:45,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76bfdcf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,548 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cac4303 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@536a4a58 2024-11-12T19:32:45,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39b10898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x305f2915 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18751c86 2024-11-12T19:32:45,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b132d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d7115de to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2dd0bbda 2024-11-12T19:32:45,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30d4d4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ec99212 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c57419f 2024-11-12T19:32:45,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a33c837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e6758ed to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@febc460 2024-11-12T19:32:45,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c7c7c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,705 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f0c7188 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e957ecd 2024-11-12T19:32:45,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37950159, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22daddc4 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d5a9f0f 2024-11-12T19:32:45,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6704743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50c9c1d1 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39028e20 2024-11-12T19:32:45,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T19:32:45,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4c9c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:32:45,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T19:32:45,755 DEBUG [hconnection-0x3c3d0d5c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,759 DEBUG [hconnection-0x69194a26-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-12T19:32:45,762 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-12T19:32:45,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:45,768 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48606, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,769 DEBUG [hconnection-0x5fc4f1a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-12T19:32:45,775 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:45,778 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,778 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:45,779 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T19:32:45,779 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T19:32:45,779 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-12T19:32:45,779 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-12T19:32:45,780 DEBUG [hconnection-0x3740e823-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:45,783 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-12T19:32:45,783 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-12T19:32:45,787 DEBUG [hconnection-0x1bfd3810-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,792 DEBUG [hconnection-0x45f0f0a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,803 DEBUG [hconnection-0x21675e13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,807 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,807 DEBUG [hconnection-0x115167a0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:32:45,815 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,819 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,825 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48640, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,826 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48646, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,839 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,876 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:32:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:45,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:32:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:45,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:45,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:45,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:45,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:45,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:45,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:45,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:45,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:45,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:45,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:46,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/abbf0f7982344ed19cf378f41ab9cd9e is 50, key is test_row_0/A:col10/1731439965878/Put/seqid=0 2024-11-12T19:32:46,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440026090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440026079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440026099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440026102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440026114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741839_1015 (size=12001) 2024-11-12T19:32:46,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/abbf0f7982344ed19cf378f41ab9cd9e 2024-11-12T19:32:46,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:46,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:46,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:46,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440026263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440026264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440026265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440026265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440026265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/d8edb657ee6e4630b9a05892602947f3 is 50, key is test_row_0/B:col10/1731439965878/Put/seqid=0 2024-11-12T19:32:46,333 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:46,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:46,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:46,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741840_1016 (size=12001) 2024-11-12T19:32:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:46,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440026474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440026478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440026480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440026477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440026481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:46,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:46,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:46,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,667 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:46,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:46,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:46,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/d8edb657ee6e4630b9a05892602947f3 2024-11-12T19:32:46,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440026781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440026788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440026788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440026791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440026792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:46,824 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-12T19:32:46,826 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T19:32:46,827 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-12T19:32:46,839 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:46,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:46,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:46,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:46,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:46,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/cb60be88054a4dfa986eb3d02d810764 is 50, key is test_row_0/C:col10/1731439965878/Put/seqid=0 2024-11-12T19:32:46,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741841_1017 (size=12001) 2024-11-12T19:32:46,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/cb60be88054a4dfa986eb3d02d810764 2024-11-12T19:32:47,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:47,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:47,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:47,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:47,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:47,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:47,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:47,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/abbf0f7982344ed19cf378f41ab9cd9e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e 2024-11-12T19:32:47,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:47,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:32:47,082 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-12T19:32:47,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/d8edb657ee6e4630b9a05892602947f3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3 2024-11-12T19:32:47,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:32:47,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/cb60be88054a4dfa986eb3d02d810764 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764 2024-11-12T19:32:47,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:32:47,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 76d7848c1ddd620b84cb604cad3a693a in 1280ms, sequenceid=13, compaction requested=false 2024-11-12T19:32:47,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:47,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:47,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-12T19:32:47,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:47,196 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:47,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/48106f0056ba4091856ffac5e0f51507 is 50, key is test_row_0/A:col10/1731439966105/Put/seqid=0 2024-11-12T19:32:47,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741842_1018 (size=12001) 2024-11-12T19:32:47,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:47,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:47,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440027361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440027366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440027368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440027376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440027378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,489 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:32:47,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440027492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440027492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440027492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440027500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440027500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,670 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/48106f0056ba4091856ffac5e0f51507 2024-11-12T19:32:47,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440027708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440027709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440027711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440027715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:47,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440027716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:47,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/5842002a142d4ca984231d1b5bdb4e6a is 50, key is test_row_0/B:col10/1731439966105/Put/seqid=0 2024-11-12T19:32:47,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741843_1019 (size=12001) 2024-11-12T19:32:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:48,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440028026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440028026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440028029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440028033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440028037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,194 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/5842002a142d4ca984231d1b5bdb4e6a 2024-11-12T19:32:48,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/9df159fbed6e431c9029d6ed8ecb9347 is 50, key is test_row_0/C:col10/1731439966105/Put/seqid=0 2024-11-12T19:32:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741844_1020 (size=12001) 2024-11-12T19:32:48,265 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/9df159fbed6e431c9029d6ed8ecb9347 2024-11-12T19:32:48,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/48106f0056ba4091856ffac5e0f51507 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507 2024-11-12T19:32:48,302 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507, entries=150, sequenceid=37, filesize=11.7 K 2024-11-12T19:32:48,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/5842002a142d4ca984231d1b5bdb4e6a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a 2024-11-12T19:32:48,322 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a, entries=150, sequenceid=37, filesize=11.7 K 2024-11-12T19:32:48,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/9df159fbed6e431c9029d6ed8ecb9347 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347 2024-11-12T19:32:48,341 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347, entries=150, sequenceid=37, filesize=11.7 K 2024-11-12T19:32:48,347 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 76d7848c1ddd620b84cb604cad3a693a in 1150ms, sequenceid=37, compaction requested=false 2024-11-12T19:32:48,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:48,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:48,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-12T19:32:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-12T19:32:48,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-12T19:32:48,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5690 sec 2024-11-12T19:32:48,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.5910 sec 2024-11-12T19:32:48,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:32:48,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:48,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:48,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:48,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:48,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:48,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:48,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:48,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/07fc215acdf64657a1ab5ccb84ca050b is 50, key is test_row_0/A:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:48,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741845_1021 (size=14341) 2024-11-12T19:32:48,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/07fc215acdf64657a1ab5ccb84ca050b 2024-11-12T19:32:48,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440028631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440028638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440028658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440028658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440028659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bd9ef219c7884e2886f0c43acc4e5b32 is 50, key is test_row_0/B:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:48,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741846_1022 (size=12001) 2024-11-12T19:32:48,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bd9ef219c7884e2886f0c43acc4e5b32 2024-11-12T19:32:48,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440028763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/fb99e15af6d442bfb168a22becddadb8 is 50, key is test_row_0/C:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:48,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440028763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440028784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440028788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440028783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741847_1023 (size=12001) 2024-11-12T19:32:48,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:48,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440028979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:48,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440028979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440029000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440029004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440029007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/fb99e15af6d442bfb168a22becddadb8 2024-11-12T19:32:49,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/07fc215acdf64657a1ab5ccb84ca050b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b 2024-11-12T19:32:49,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440029300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440029301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440029314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440029315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b, entries=200, sequenceid=52, filesize=14.0 K 2024-11-12T19:32:49,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bd9ef219c7884e2886f0c43acc4e5b32 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32 2024-11-12T19:32:49,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:49,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440029319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:49,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32, entries=150, sequenceid=52, filesize=11.7 K 2024-11-12T19:32:49,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/fb99e15af6d442bfb168a22becddadb8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8 2024-11-12T19:32:49,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8, entries=150, sequenceid=52, filesize=11.7 K 2024-11-12T19:32:49,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 76d7848c1ddd620b84cb604cad3a693a in 921ms, sequenceid=52, compaction requested=true 2024-11-12T19:32:49,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:49,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:32:49,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:49,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:32:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:32:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:49,484 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:32:49,485 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:32:49,494 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:32:49,496 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:32:49,496 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:49,497 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=35.2 K 2024-11-12T19:32:49,498 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:32:49,498 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:32:49,498 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:49,498 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=37.4 K 2024-11-12T19:32:49,512 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting abbf0f7982344ed19cf378f41ab9cd9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731439965855 2024-11-12T19:32:49,513 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d8edb657ee6e4630b9a05892602947f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731439965855 2024-11-12T19:32:49,513 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48106f0056ba4091856ffac5e0f51507, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731439966015 2024-11-12T19:32:49,516 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07fc215acdf64657a1ab5ccb84ca050b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967356 2024-11-12T19:32:49,517 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5842002a142d4ca984231d1b5bdb4e6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731439966015 2024-11-12T19:32:49,529 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bd9ef219c7884e2886f0c43acc4e5b32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967364 2024-11-12T19:32:49,618 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#9 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:49,619 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/35c96e4f9f124a28a942c6be5680a9be is 50, key is test_row_0/A:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:49,665 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#10 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:49,666 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/25929b3e769148cea2d5430b1888263e is 50, key is test_row_0/B:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741848_1024 (size=12104) 2024-11-12T19:32:49,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741849_1025 (size=12104) 2024-11-12T19:32:49,811 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/25929b3e769148cea2d5430b1888263e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/25929b3e769148cea2d5430b1888263e 2024-11-12T19:32:49,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:49,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5e571d64ae784de09638afbf0aab5fbc is 50, key is test_row_0/A:col10/1731439968641/Put/seqid=0 2024-11-12T19:32:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-12T19:32:49,891 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-12T19:32:49,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-12T19:32:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:49,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741850_1026 (size=14341) 2024-11-12T19:32:49,915 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:49,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5e571d64ae784de09638afbf0aab5fbc 2024-11-12T19:32:49,923 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:49,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:49,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/631b9f84ab7d44349993393747dab717 is 50, key is test_row_0/B:col10/1731439968641/Put/seqid=0 2024-11-12T19:32:49,993 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into 25929b3e769148cea2d5430b1888263e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:49,993 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:49,993 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=13, startTime=1731439969483; duration=0sec 2024-11-12T19:32:49,993 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:49,994 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:32:49,994 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:32:49,997 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:32:49,997 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:32:49,997 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:49,997 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=35.2 K 2024-11-12T19:32:49,998 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting cb60be88054a4dfa986eb3d02d810764, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731439965855 2024-11-12T19:32:50,000 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9df159fbed6e431c9029d6ed8ecb9347, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731439966015 2024-11-12T19:32:50,005 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting fb99e15af6d442bfb168a22becddadb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967364 2024-11-12T19:32:50,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:50,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741851_1027 (size=12001) 2024-11-12T19:32:50,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/631b9f84ab7d44349993393747dab717 2024-11-12T19:32:50,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440030005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440030018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440030004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440030024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440030031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,074 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#13 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:50,075 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/60f74d7ecd224644a1587adc44c0520d is 50, key is test_row_0/C:col10/1731439968540/Put/seqid=0 2024-11-12T19:32:50,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/4c8946d6fdf142cfbd77e197992fb179 is 50, key is test_row_0/C:col10/1731439968641/Put/seqid=0 2024-11-12T19:32:50,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,131 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/35c96e4f9f124a28a942c6be5680a9be as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/35c96e4f9f124a28a942c6be5680a9be 2024-11-12T19:32:50,152 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into 35c96e4f9f124a28a942c6be5680a9be(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:50,152 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:50,152 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=13, startTime=1731439969471; duration=0sec 2024-11-12T19:32:50,153 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:50,153 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:32:50,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741852_1028 (size=12104) 2024-11-12T19:32:50,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440030142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440030143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440030143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440030144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440030146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741853_1029 (size=12001) 2024-11-12T19:32:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:50,251 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:50,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440030362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440030380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440030381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440030383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440030383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:50,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:50,583 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:50,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/4c8946d6fdf142cfbd77e197992fb179 2024-11-12T19:32:50,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,612 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/60f74d7ecd224644a1587adc44c0520d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/60f74d7ecd224644a1587adc44c0520d 2024-11-12T19:32:50,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5e571d64ae784de09638afbf0aab5fbc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc 2024-11-12T19:32:50,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc, entries=200, sequenceid=74, filesize=14.0 K 2024-11-12T19:32:50,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/631b9f84ab7d44349993393747dab717 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717 2024-11-12T19:32:50,690 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into 60f74d7ecd224644a1587adc44c0520d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:50,690 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:50,690 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=13, startTime=1731439969484; duration=0sec 2024-11-12T19:32:50,691 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:50,691 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:32:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440030680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440030693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440030695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440030695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440030696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:50,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717, entries=150, sequenceid=74, filesize=11.7 K 2024-11-12T19:32:50,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/4c8946d6fdf142cfbd77e197992fb179 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179 2024-11-12T19:32:50,747 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:50,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:50,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179, entries=150, sequenceid=74, filesize=11.7 K 2024-11-12T19:32:50,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 76d7848c1ddd620b84cb604cad3a693a in 963ms, sequenceid=74, compaction requested=false 2024-11-12T19:32:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:50,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:50,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-12T19:32:50,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:50,911 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:32:50,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:50,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/15c2737e1dec4f8b8968c77df10e3460 is 50, key is test_row_0/A:col10/1731439970023/Put/seqid=0 2024-11-12T19:32:50,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741854_1030 (size=12001) 2024-11-12T19:32:50,994 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/15c2737e1dec4f8b8968c77df10e3460 2024-11-12T19:32:51,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:51,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7fb5610d3c10482d818eb5a728a315ae is 50, key is test_row_0/B:col10/1731439970023/Put/seqid=0 2024-11-12T19:32:51,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741855_1031 (size=12001) 2024-11-12T19:32:51,068 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7fb5610d3c10482d818eb5a728a315ae 2024-11-12T19:32:51,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/6f8954daf67e4df494af9f5eefe7ec43 is 50, key is test_row_0/C:col10/1731439970023/Put/seqid=0 2024-11-12T19:32:51,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741856_1032 (size=12001) 2024-11-12T19:32:51,160 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/6f8954daf67e4df494af9f5eefe7ec43 2024-11-12T19:32:51,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/15c2737e1dec4f8b8968c77df10e3460 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460 2024-11-12T19:32:51,197 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460, entries=150, sequenceid=91, filesize=11.7 K 2024-11-12T19:32:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7fb5610d3c10482d818eb5a728a315ae as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae 2024-11-12T19:32:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:51,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:51,219 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae, entries=150, sequenceid=91, filesize=11.7 K 2024-11-12T19:32:51,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/6f8954daf67e4df494af9f5eefe7ec43 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43 2024-11-12T19:32:51,247 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43, entries=150, sequenceid=91, filesize=11.7 K 2024-11-12T19:32:51,249 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=40.25 KB/41220 for 76d7848c1ddd620b84cb604cad3a693a in 337ms, sequenceid=91, compaction requested=true 2024-11-12T19:32:51,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:51,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:51,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-12T19:32:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-12T19:32:51,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-12T19:32:51,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3450 sec 2024-11-12T19:32:51,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:32:51,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:51,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.3810 sec 2024-11-12T19:32:51,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:51,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:51,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:51,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:51,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:51,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:51,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b6337f5a5d264843b04ead124a9e055c is 50, key is test_row_0/A:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:51,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741857_1033 (size=12001) 2024-11-12T19:32:51,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440031414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440031418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440031421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440031426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440031427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440031532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440031531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440031533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440031534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440031535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440031750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440031749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440031751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440031749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440031757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:51,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b6337f5a5d264843b04ead124a9e055c 2024-11-12T19:32:51,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd is 50, key is test_row_0/B:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:51,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741858_1034 (size=12001) 2024-11-12T19:32:51,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd 2024-11-12T19:32:51,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 is 50, key is test_row_0/C:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741859_1035 (size=12001) 2024-11-12T19:32:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-12T19:32:52,013 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-12T19:32:52,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-12T19:32:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:52,037 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:52,039 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:52,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:52,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440032067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440032068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440032069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440032069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440032075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:52,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:52,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:52,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:52,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:52,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:52,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 2024-11-12T19:32:52,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:52,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:52,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b6337f5a5d264843b04ead124a9e055c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c 2024-11-12T19:32:52,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c, entries=150, sequenceid=103, filesize=11.7 K 2024-11-12T19:32:52,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd 2024-11-12T19:32:52,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd, entries=150, sequenceid=103, filesize=11.7 K 2024-11-12T19:32:52,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 2024-11-12T19:32:52,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3, entries=150, sequenceid=103, filesize=11.7 K 2024-11-12T19:32:52,541 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:52,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:52,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:52,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 76d7848c1ddd620b84cb604cad3a693a in 1269ms, sequenceid=103, compaction requested=true 2024-11-12T19:32:52,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:52,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:32:52,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:52,545 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:52,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:32:52,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:52,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:32:52,546 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:52,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:52,549 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:52,549 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:32:52,549 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,549 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/35c96e4f9f124a28a942c6be5680a9be, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=49.3 K 2024-11-12T19:32:52,551 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35c96e4f9f124a28a942c6be5680a9be, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967364 2024-11-12T19:32:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,553 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e571d64ae784de09638afbf0aab5fbc, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731439968616 2024-11-12T19:32:52,554 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:52,554 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:32:52,554 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,554 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15c2737e1dec4f8b8968c77df10e3460, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1731439969855 2024-11-12T19:32:52,554 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/25929b3e769148cea2d5430b1888263e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=47.0 K 2024-11-12T19:32:52,555 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6337f5a5d264843b04ead124a9e055c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:52,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 25929b3e769148cea2d5430b1888263e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967364 2024-11-12T19:32:52,562 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 631b9f84ab7d44349993393747dab717, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731439968640 2024-11-12T19:32:52,564 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fb5610d3c10482d818eb5a728a315ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1731439969855 2024-11-12T19:32:52,570 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a33e9f7ebdb402bb894fb8d8f9e9ccd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:52,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-12T19:32:52,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:52,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:52,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:52,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:52,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:52,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:52,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/89486f468dac43c99f000fc82b8413a6 is 50, key is test_row_0/A:col10/1731439972593/Put/seqid=0 2024-11-12T19:32:52,619 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:52,620 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/c19a62eb5e784775b1a5e17670aab142 is 50, key is test_row_0/A:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:52,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440032627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,651 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#23 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:52,652 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bc42a5286c164fa0961edddb987694b7 is 50, key is test_row_0/B:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440032629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440032644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741860_1036 (size=14391) 2024-11-12T19:32:52,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440032648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440032650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/89486f468dac43c99f000fc82b8413a6 2024-11-12T19:32:52,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741861_1037 (size=12241) 2024-11-12T19:32:52,704 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:52,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:52,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:52,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/0d1e3835791b4dd9ab4349f5563f7e5e is 50, key is test_row_0/B:col10/1731439972593/Put/seqid=0 2024-11-12T19:32:52,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741862_1038 (size=12241) 2024-11-12T19:32:52,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440032764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440032767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440032774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440032777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440032777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741863_1039 (size=12051) 2024-11-12T19:32:52,815 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bc42a5286c164fa0961edddb987694b7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bc42a5286c164fa0961edddb987694b7 2024-11-12T19:32:52,845 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into bc42a5286c164fa0961edddb987694b7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:52,845 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:52,845 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=12, startTime=1731439972545; duration=0sec 2024-11-12T19:32:52,845 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:52,845 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:32:52,846 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:52,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:52,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:52,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:52,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:52,925 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:52,925 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:32:52,925 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:52,925 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/60f74d7ecd224644a1587adc44c0520d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=47.0 K 2024-11-12T19:32:52,932 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 60f74d7ecd224644a1587adc44c0520d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731439967364 2024-11-12T19:32:52,941 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c8946d6fdf142cfbd77e197992fb179, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731439968640 2024-11-12T19:32:52,943 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f8954daf67e4df494af9f5eefe7ec43, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1731439969855 2024-11-12T19:32:52,947 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b1ceace1e43f4fdbafca2fe8e2c2abb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:52,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440032977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440032982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:52,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:52,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440032991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440032998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,005 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#25 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:53,006 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/95737f6da8df4c0db162e3ea7b005bca is 50, key is test_row_0/C:col10/1731439971265/Put/seqid=0 2024-11-12T19:32:53,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440032998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741864_1040 (size=12241) 2024-11-12T19:32:53,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:53,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:53,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,085 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/95737f6da8df4c0db162e3ea7b005bca as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/95737f6da8df4c0db162e3ea7b005bca 2024-11-12T19:32:53,137 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/c19a62eb5e784775b1a5e17670aab142 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/c19a62eb5e784775b1a5e17670aab142 2024-11-12T19:32:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:53,154 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into 95737f6da8df4c0db162e3ea7b005bca(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:53,154 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:53,154 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=12, startTime=1731439972546; duration=0sec 2024-11-12T19:32:53,154 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:53,154 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:32:53,179 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into c19a62eb5e784775b1a5e17670aab142(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:53,179 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:53,179 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=12, startTime=1731439972545; duration=0sec 2024-11-12T19:32:53,179 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:53,179 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:32:53,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/0d1e3835791b4dd9ab4349f5563f7e5e 2024-11-12T19:32:53,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:53,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:53,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:53,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440033286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/25a9769b81894f22b1791a6742ca2736 is 50, key is test_row_0/C:col10/1731439972593/Put/seqid=0 2024-11-12T19:32:53,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440033302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440033301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440033311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440033315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741865_1041 (size=12051) 2024-11-12T19:32:53,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/25a9769b81894f22b1791a6742ca2736 2024-11-12T19:32:53,397 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:53,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/89486f468dac43c99f000fc82b8413a6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6 2024-11-12T19:32:53,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6, entries=200, sequenceid=129, filesize=14.1 K 2024-11-12T19:32:53,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/0d1e3835791b4dd9ab4349f5563f7e5e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e 2024-11-12T19:32:53,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e, entries=150, sequenceid=129, filesize=11.8 K 2024-11-12T19:32:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/25a9769b81894f22b1791a6742ca2736 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736 2024-11-12T19:32:53,554 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:53,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:53,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:53,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:53,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736, entries=150, sequenceid=129, filesize=11.8 K 2024-11-12T19:32:53,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 76d7848c1ddd620b84cb604cad3a693a in 989ms, sequenceid=129, compaction requested=false 2024-11-12T19:32:53,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:53,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:53,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-12T19:32:53,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:53,717 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:32:53,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:53,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:53,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:53,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:53,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:53,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:53,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 is 50, key is test_row_0/A:col10/1731439972639/Put/seqid=0 2024-11-12T19:32:53,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:53,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:53,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741866_1042 (size=12151) 2024-11-12T19:32:53,816 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 2024-11-12T19:32:53,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/f1c8c8c1b11d4282b99b59d3afc50ec1 is 50, key is test_row_0/B:col10/1731439972639/Put/seqid=0 2024-11-12T19:32:53,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741867_1043 (size=12151) 2024-11-12T19:32:53,879 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/f1c8c8c1b11d4282b99b59d3afc50ec1 2024-11-12T19:32:53,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/50f57c4a963640be9972623c1d49aa21 is 50, key is test_row_0/C:col10/1731439972639/Put/seqid=0 2024-11-12T19:32:53,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440033906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440033906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440033913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440033914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:53,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440033922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:53,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741868_1044 (size=12151) 2024-11-12T19:32:53,961 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/50f57c4a963640be9972623c1d49aa21 2024-11-12T19:32:53,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 2024-11-12T19:32:54,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440034016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440034027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440034029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440034030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,033 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:32:54,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440034032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/f1c8c8c1b11d4282b99b59d3afc50ec1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1 2024-11-12T19:32:54,062 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:32:54,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/50f57c4a963640be9972623c1d49aa21 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21 2024-11-12T19:32:54,086 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:32:54,093 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 76d7848c1ddd620b84cb604cad3a693a in 376ms, sequenceid=143, compaction requested=true 2024-11-12T19:32:54,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:54,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-12T19:32:54,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-12T19:32:54,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-12T19:32:54,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0630 sec 2024-11-12T19:32:54,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.0830 sec 2024-11-12T19:32:54,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-12T19:32:54,161 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-12T19:32:54,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:54,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-12T19:32:54,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:54,175 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:54,180 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:54,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:54,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:54,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:32:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:54,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:54,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:54,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7a36bfa6161548de99fb4e92dd5c7f88 is 50, key is test_row_0/A:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:54,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440034253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440034255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440034258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440034262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440034263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741869_1045 (size=12151) 2024-11-12T19:32:54,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7a36bfa6161548de99fb4e92dd5c7f88 2024-11-12T19:32:54,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/6239bbc0e75c4ee1806ebd169162cf75 is 50, key is test_row_0/B:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:54,339 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:54,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-12T19:32:54,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:54,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741870_1046 (size=12151) 2024-11-12T19:32:54,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440034367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440034367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440034368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440034374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440034374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:54,510 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:54,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-12T19:32:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440034580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440034581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440034583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440034585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440034584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,676 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:54,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-12T19:32:54,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:54,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/6239bbc0e75c4ee1806ebd169162cf75 2024-11-12T19:32:54,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:54,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/14ccf3a4965f471e8d438be24b0bd615 is 50, key is test_row_0/C:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:54,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:54,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741871_1047 (size=12151) 2024-11-12T19:32:54,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-12T19:32:54,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:54,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/14ccf3a4965f471e8d438be24b0bd615 2024-11-12T19:32:54,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:54,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7a36bfa6161548de99fb4e92dd5c7f88 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88 2024-11-12T19:32:54,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440034886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440034891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440034892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440034898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:54,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440034898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:54,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88, entries=150, sequenceid=170, filesize=11.9 K 2024-11-12T19:32:54,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/6239bbc0e75c4ee1806ebd169162cf75 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75 2024-11-12T19:32:54,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75, entries=150, sequenceid=170, filesize=11.9 K 2024-11-12T19:32:54,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/14ccf3a4965f471e8d438be24b0bd615 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615 2024-11-12T19:32:54,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615, entries=150, sequenceid=170, filesize=11.9 K 2024-11-12T19:32:54,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 76d7848c1ddd620b84cb604cad3a693a in 735ms, sequenceid=170, compaction requested=true 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:32:54,966 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:32:54,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:32:54,970 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:54,980 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50934 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:54,980 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:32:54,980 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,980 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/c19a62eb5e784775b1a5e17670aab142, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=49.7 K 2024-11-12T19:32:54,981 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c19a62eb5e784775b1a5e17670aab142, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:54,982 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89486f468dac43c99f000fc82b8413a6, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1731439971410 2024-11-12T19:32:54,983 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa0fbe223b8c4e7d82e8b580f2dc30c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731439972612 2024-11-12T19:32:54,984 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a36bfa6161548de99fb4e92dd5c7f88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:54,988 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:54,989 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:32:54,989 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:54,989 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bc42a5286c164fa0961edddb987694b7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=47.5 K 2024-11-12T19:32:54,990 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bc42a5286c164fa0961edddb987694b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:54,996 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d1e3835791b4dd9ab4349f5563f7e5e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1731439971415 2024-11-12T19:32:54,997 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f1c8c8c1b11d4282b99b59d3afc50ec1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731439972612 2024-11-12T19:32:54,998 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6239bbc0e75c4ee1806ebd169162cf75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:55,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:55,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-12T19:32:55,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:55,008 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:32:55,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:55,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:55,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:55,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:55,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:55,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:55,019 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#33 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:55,019 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/f455d468bac24c55b013be600e0f3311 is 50, key is test_row_0/A:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:55,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5f664198be454cf0beac10e73e45b436 is 50, key is test_row_1/A:col10/1731439974253/Put/seqid=0 2024-11-12T19:32:55,078 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:55,082 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/46b42fa553ed4610a728ba53bf2900ba is 50, key is test_row_0/B:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:55,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741873_1049 (size=9757) 2024-11-12T19:32:55,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741872_1048 (size=12527) 2024-11-12T19:32:55,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741874_1050 (size=12527) 2024-11-12T19:32:55,202 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/46b42fa553ed4610a728ba53bf2900ba as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/46b42fa553ed4610a728ba53bf2900ba 2024-11-12T19:32:55,231 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into 46b42fa553ed4610a728ba53bf2900ba(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:55,231 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:55,231 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=12, startTime=1731439974966; duration=0sec 2024-11-12T19:32:55,231 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:55,259 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:32:55,259 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:55,271 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:55,271 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:32:55,271 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:55,272 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/95737f6da8df4c0db162e3ea7b005bca, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=47.5 K 2024-11-12T19:32:55,276 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 95737f6da8df4c0db162e3ea7b005bca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731439971245 2024-11-12T19:32:55,280 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 25a9769b81894f22b1791a6742ca2736, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1731439971415 2024-11-12T19:32:55,285 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 50f57c4a963640be9972623c1d49aa21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731439972612 2024-11-12T19:32:55,288 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 14ccf3a4965f471e8d438be24b0bd615, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:55,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:55,367 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:55,370 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/24287f2f376848dbb8bd7870d2754f91 is 50, key is test_row_0/C:col10/1731439974224/Put/seqid=0 2024-11-12T19:32:55,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741875_1051 (size=12527) 2024-11-12T19:32:55,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:55,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:55,430 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/24287f2f376848dbb8bd7870d2754f91 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/24287f2f376848dbb8bd7870d2754f91 2024-11-12T19:32:55,459 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into 24287f2f376848dbb8bd7870d2754f91(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:55,459 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:55,459 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=12, startTime=1731439974966; duration=0sec 2024-11-12T19:32:55,459 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:55,459 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:32:55,490 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5f664198be454cf0beac10e73e45b436 2024-11-12T19:32:55,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/b9ec00f53c2b4b898bb34ba3e0058083 is 50, key is test_row_1/B:col10/1731439974253/Put/seqid=0 2024-11-12T19:32:55,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440035510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,530 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/f455d468bac24c55b013be600e0f3311 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/f455d468bac24c55b013be600e0f3311 2024-11-12T19:32:55,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440035512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440035512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440035519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440035523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,555 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into f455d468bac24c55b013be600e0f3311(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:55,555 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:55,555 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=12, startTime=1731439974966; duration=0sec 2024-11-12T19:32:55,555 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:55,556 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:32:55,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741876_1052 (size=9757) 2024-11-12T19:32:55,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440035638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440035638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440035639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440035648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440035649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440035849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440035849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440035849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440035861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440035869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:55,974 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/b9ec00f53c2b4b898bb34ba3e0058083 2024-11-12T19:32:56,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/174d8a16a436415fb1a6b2251a1caf67 is 50, key is test_row_1/C:col10/1731439974253/Put/seqid=0 2024-11-12T19:32:56,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741877_1053 (size=9757) 2024-11-12T19:32:56,089 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/174d8a16a436415fb1a6b2251a1caf67 2024-11-12T19:32:56,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/5f664198be454cf0beac10e73e45b436 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436 2024-11-12T19:32:56,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440036161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440036163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440036163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,175 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436, entries=100, sequenceid=179, filesize=9.5 K 2024-11-12T19:32:56,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/b9ec00f53c2b4b898bb34ba3e0058083 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083 2024-11-12T19:32:56,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440036181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440036186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,204 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083, entries=100, sequenceid=179, filesize=9.5 K 2024-11-12T19:32:56,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/174d8a16a436415fb1a6b2251a1caf67 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67 2024-11-12T19:32:56,247 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67, entries=100, sequenceid=179, filesize=9.5 K 2024-11-12T19:32:56,249 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 76d7848c1ddd620b84cb604cad3a693a in 1240ms, sequenceid=179, compaction requested=false 2024-11-12T19:32:56,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:56,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:56,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-12T19:32:56,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-12T19:32:56,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-12T19:32:56,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0790 sec 2024-11-12T19:32:56,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.0950 sec 2024-11-12T19:32:56,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-12T19:32:56,296 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-12T19:32:56,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-12T19:32:56,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:56,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:56,327 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:56,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:56,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:56,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:56,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-12T19:32:56,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:56,513 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:32:56,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:56,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:56,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:56,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:56,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:56,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:56,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/a32e65acab3c4074a13e2f9c26f896ac is 50, key is test_row_0/A:col10/1731439975514/Put/seqid=0 2024-11-12T19:32:56,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741878_1054 (size=12151) 2024-11-12T19:32:56,599 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/a32e65acab3c4074a13e2f9c26f896ac 2024-11-12T19:32:56,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/69a2881f5f9445e78e4ef3433f84eac7 is 50, key is test_row_0/B:col10/1731439975514/Put/seqid=0 2024-11-12T19:32:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:56,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:56,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741879_1055 (size=12151) 2024-11-12T19:32:56,696 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/69a2881f5f9445e78e4ef3433f84eac7 2024-11-12T19:32:56,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440036698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440036704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440036706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440036707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440036715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/04e95edc86d9448a8f214c79f9d606de is 50, key is test_row_0/C:col10/1731439975514/Put/seqid=0 2024-11-12T19:32:56,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741880_1056 (size=12151) 2024-11-12T19:32:56,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440036820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440036827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:56,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440036831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:57,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440037026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440037032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440037044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,211 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/04e95edc86d9448a8f214c79f9d606de 2024-11-12T19:32:57,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/a32e65acab3c4074a13e2f9c26f896ac as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac 2024-11-12T19:32:57,283 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac, entries=150, sequenceid=210, filesize=11.9 K 2024-11-12T19:32:57,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/69a2881f5f9445e78e4ef3433f84eac7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7 2024-11-12T19:32:57,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440037331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440037341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,351 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7, entries=150, sequenceid=210, filesize=11.9 K 2024-11-12T19:32:57,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440037351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/04e95edc86d9448a8f214c79f9d606de as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de 2024-11-12T19:32:57,427 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de, entries=150, sequenceid=210, filesize=11.9 K 2024-11-12T19:32:57,430 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 76d7848c1ddd620b84cb604cad3a693a in 917ms, sequenceid=210, compaction requested=true 2024-11-12T19:32:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-12T19:32:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-12T19:32:57,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:57,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-12T19:32:57,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1170 sec 2024-11-12T19:32:57,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.1450 sec 2024-11-12T19:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:32:57,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:57,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:57,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/93730cc0ad84445ebb2971ad87eaeb74 is 50, key is test_row_0/A:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:57,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741881_1057 (size=14537) 2024-11-12T19:32:57,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440037912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440037915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440037924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440037924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:57,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440037926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440038028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440038029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440038039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440038039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440038047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/93730cc0ad84445ebb2971ad87eaeb74 2024-11-12T19:32:58,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440038232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440038243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440038247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440038248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440038263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/9ba3220e3d7f4a57a8f3724cc1f74e1d is 50, key is test_row_0/B:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:58,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741882_1058 (size=9757) 2024-11-12T19:32:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-12T19:32:58,443 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-12T19:32:58,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:32:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-12T19:32:58,469 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:32:58,470 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:32:58,471 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:32:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:32:58,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440038549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440038549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440038555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440038558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:32:58,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440038580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:58,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:58,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/9ba3220e3d7f4a57a8f3724cc1f74e1d 2024-11-12T19:32:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:32:58,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/7b9021c4a51f40839a73522c9c358da4 is 50, key is test_row_0/C:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:58,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:58,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:58,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:58,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741883_1059 (size=9757) 2024-11-12T19:32:58,963 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:58,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:58,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:58,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:58,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440039059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:59,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440039060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:59,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440039063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:59,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440039069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:32:59,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:32:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440039095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:32:59,136 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:59,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:59,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:59,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/7b9021c4a51f40839a73522c9c358da4 2024-11-12T19:32:59,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/93730cc0ad84445ebb2971ad87eaeb74 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74 2024-11-12T19:32:59,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74, entries=200, sequenceid=222, filesize=14.2 K 2024-11-12T19:32:59,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:59,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/9ba3220e3d7f4a57a8f3724cc1f74e1d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d 2024-11-12T19:32:59,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:32:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:32:59,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d, entries=100, sequenceid=222, filesize=9.5 K 2024-11-12T19:32:59,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/7b9021c4a51f40839a73522c9c358da4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4 2024-11-12T19:32:59,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4, entries=100, sequenceid=222, filesize=9.5 K 2024-11-12T19:32:59,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 76d7848c1ddd620b84cb604cad3a693a in 1621ms, sequenceid=222, compaction requested=true 2024-11-12T19:32:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:59,367 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:32:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:59,368 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:59,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:32:59,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:59,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:32:59,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:59,370 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48972 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:59,370 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:32:59,370 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,370 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/f455d468bac24c55b013be600e0f3311, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=47.8 K 2024-11-12T19:32:59,371 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f455d468bac24c55b013be600e0f3311, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:59,375 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f664198be454cf0beac10e73e45b436, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731439974253 2024-11-12T19:32:59,376 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 44192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:59,376 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:32:59,376 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,377 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/46b42fa553ed4610a728ba53bf2900ba, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=43.2 K 2024-11-12T19:32:59,377 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a32e65acab3c4074a13e2f9c26f896ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731439975493 2024-11-12T19:32:59,379 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93730cc0ad84445ebb2971ad87eaeb74, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439976683 2024-11-12T19:32:59,380 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 46b42fa553ed4610a728ba53bf2900ba, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:59,382 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b9ec00f53c2b4b898bb34ba3e0058083, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731439974253 2024-11-12T19:32:59,386 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 69a2881f5f9445e78e4ef3433f84eac7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731439975493 2024-11-12T19:32:59,388 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ba3220e3d7f4a57a8f3724cc1f74e1d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439977732 2024-11-12T19:32:59,427 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#45 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:59,428 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/8de19df865084f74b25cb1a5058adf11 is 50, key is test_row_0/B:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:59,457 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:59,458 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/e73405f3289d43bb9230928cccbf2287 is 50, key is test_row_0/A:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:59,487 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:32:59,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-12T19:32:59,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,489 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-12T19:32:59,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:32:59,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:59,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:32:59,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:59,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:32:59,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:32:59,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741884_1060 (size=12663) 2024-11-12T19:32:59,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741885_1061 (size=12663) 2024-11-12T19:32:59,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/182e0a1b020a4a1e9dbfa79a22c154d0 is 50, key is test_row_0/A:col10/1731439977909/Put/seqid=0 2024-11-12T19:32:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:32:59,584 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/8de19df865084f74b25cb1a5058adf11 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/8de19df865084f74b25cb1a5058adf11 2024-11-12T19:32:59,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741886_1062 (size=12151) 2024-11-12T19:32:59,619 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into 8de19df865084f74b25cb1a5058adf11(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:59,620 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:59,620 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=12, startTime=1731439979368; duration=0sec 2024-11-12T19:32:59,620 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:32:59,620 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:32:59,620 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:32:59,635 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/e73405f3289d43bb9230928cccbf2287 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e73405f3289d43bb9230928cccbf2287 2024-11-12T19:32:59,639 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 44192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:32:59,639 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:32:59,639 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:32:59,640 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/24287f2f376848dbb8bd7870d2754f91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=43.2 K 2024-11-12T19:32:59,641 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 24287f2f376848dbb8bd7870d2754f91, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731439973905 2024-11-12T19:32:59,643 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 174d8a16a436415fb1a6b2251a1caf67, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731439974253 2024-11-12T19:32:59,651 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 04e95edc86d9448a8f214c79f9d606de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731439975493 2024-11-12T19:32:59,655 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b9021c4a51f40839a73522c9c358da4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439977732 2024-11-12T19:32:59,692 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into e73405f3289d43bb9230928cccbf2287(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:59,692 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:59,692 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=12, startTime=1731439979367; duration=0sec 2024-11-12T19:32:59,692 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:59,692 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:32:59,701 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:32:59,702 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/4421682134d6468cba981f4173014b95 is 50, key is test_row_0/C:col10/1731439977735/Put/seqid=0 2024-11-12T19:32:59,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741887_1063 (size=12663) 2024-11-12T19:32:59,792 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/4421682134d6468cba981f4173014b95 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4421682134d6468cba981f4173014b95 2024-11-12T19:32:59,823 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into 4421682134d6468cba981f4173014b95(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:32:59,824 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:32:59,824 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=12, startTime=1731439979369; duration=0sec 2024-11-12T19:32:59,824 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:32:59,824 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:33:00,008 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/182e0a1b020a4a1e9dbfa79a22c154d0 2024-11-12T19:33:00,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bccef6e4f4534c1c9da83737cfcc56ab is 50, key is test_row_0/B:col10/1731439977909/Put/seqid=0 2024-11-12T19:33:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:00,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741888_1064 (size=12151) 2024-11-12T19:33:00,087 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bccef6e4f4534c1c9da83737cfcc56ab 2024-11-12T19:33:00,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/c1cac6fed98b430982c3029e46835e34 is 50, key is test_row_0/C:col10/1731439977909/Put/seqid=0 2024-11-12T19:33:00,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440040107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440040107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440040111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440040115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440040115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741889_1065 (size=12151) 2024-11-12T19:33:00,158 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/c1cac6fed98b430982c3029e46835e34 2024-11-12T19:33:00,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/182e0a1b020a4a1e9dbfa79a22c154d0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0 2024-11-12T19:33:00,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440040223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440040223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440040224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440040227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,245 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0, entries=150, sequenceid=246, filesize=11.9 K 2024-11-12T19:33:00,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/bccef6e4f4534c1c9da83737cfcc56ab as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab 2024-11-12T19:33:00,280 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab, entries=150, sequenceid=246, filesize=11.9 K 2024-11-12T19:33:00,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/c1cac6fed98b430982c3029e46835e34 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34 2024-11-12T19:33:00,304 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34, entries=150, sequenceid=246, filesize=11.9 K 2024-11-12T19:33:00,306 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 76d7848c1ddd620b84cb604cad3a693a in 817ms, sequenceid=246, compaction requested=false 2024-11-12T19:33:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-12T19:33:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-12T19:33:00,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-12T19:33:00,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8520 sec 2024-11-12T19:33:00,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.8880 sec 2024-11-12T19:33:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:00,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:00,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:00,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:00,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:00,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:00,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:00,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:00,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/1ff506b0d7d8423d8b381f962071495d is 50, key is test_row_0/A:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:00,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741890_1066 (size=12301) 2024-11-12T19:33:00,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/1ff506b0d7d8423d8b381f962071495d 2024-11-12T19:33:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-12T19:33:00,585 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-12T19:33:00,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-12T19:33:00,598 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:00,600 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:00,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:00,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440040583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/34ed7949673b4143a0c5bc72c2add754 is 50, key is test_row_0/B:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:00,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440040587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440040599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440040606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741891_1067 (size=12301) 2024-11-12T19:33:00,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:00,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440040707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440040712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440040719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440040720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,755 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:00,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:00,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:00,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:00,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:00,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440040910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:00,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:00,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:00,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:00,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:00,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440040919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:00,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440040929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:00,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:00,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440040928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/34ed7949673b4143a0c5bc72c2add754 2024-11-12T19:33:01,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:01,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ddc22f915e3c40188f60c90aeed9443a is 50, key is test_row_0/C:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:01,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741892_1068 (size=12301) 2024-11-12T19:33:01,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ddc22f915e3c40188f60c90aeed9443a 2024-11-12T19:33:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:01,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440041223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440041232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440041240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:01,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:01,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440041243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/1ff506b0d7d8423d8b381f962071495d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d 2024-11-12T19:33:01,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:01,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:33:01,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/34ed7949673b4143a0c5bc72c2add754 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754 2024-11-12T19:33:01,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:33:01,402 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:01,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ddc22f915e3c40188f60c90aeed9443a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a 2024-11-12T19:33:01,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:01,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:33:01,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 76d7848c1ddd620b84cb604cad3a693a in 994ms, sequenceid=263, compaction requested=true 2024-11-12T19:33:01,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:01,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:01,441 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:01,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:01,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:01,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:01,442 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:01,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:01,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:01,452 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:01,452 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:33:01,452 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,452 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/8de19df865084f74b25cb1a5058adf11, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=36.2 K 2024-11-12T19:33:01,453 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:01,453 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:33:01,454 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,454 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e73405f3289d43bb9230928cccbf2287, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=36.2 K 2024-11-12T19:33:01,454 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 8de19df865084f74b25cb1a5058adf11, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439975507 2024-11-12T19:33:01,455 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e73405f3289d43bb9230928cccbf2287, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439975507 2024-11-12T19:33:01,455 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bccef6e4f4534c1c9da83737cfcc56ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731439977909 2024-11-12T19:33:01,456 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 182e0a1b020a4a1e9dbfa79a22c154d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731439977909 2024-11-12T19:33:01,456 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 34ed7949673b4143a0c5bc72c2add754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:01,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ff506b0d7d8423d8b381f962071495d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:01,498 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:01,499 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/9be109b41cfa4173b1934dc79d904bfb is 50, key is test_row_0/A:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:01,505 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#55 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:01,506 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/1cf28ac15f664a58b97ba2f44ea73a3e is 50, key is test_row_0/B:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:01,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741893_1069 (size=12915) 2024-11-12T19:33:01,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:01,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-12T19:33:01,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,568 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741894_1070 (size=12915) 2024-11-12T19:33:01,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/18b0cb47328648cabc95b6796b2cbb7c is 50, key is test_row_0/A:col10/1731439980592/Put/seqid=0 2024-11-12T19:33:01,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741895_1071 (size=12301) 2024-11-12T19:33:01,671 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/1cf28ac15f664a58b97ba2f44ea73a3e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1cf28ac15f664a58b97ba2f44ea73a3e 2024-11-12T19:33:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:01,738 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into 1cf28ac15f664a58b97ba2f44ea73a3e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:01,738 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:01,738 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=13, startTime=1731439981442; duration=0sec 2024-11-12T19:33:01,738 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:01,738 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:33:01,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:01,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:01,755 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:01,755 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:33:01,755 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:01,755 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4421682134d6468cba981f4173014b95, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=36.2 K 2024-11-12T19:33:01,761 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4421682134d6468cba981f4173014b95, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731439975507 2024-11-12T19:33:01,766 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c1cac6fed98b430982c3029e46835e34, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731439977909 2024-11-12T19:33:01,769 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ddc22f915e3c40188f60c90aeed9443a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:01,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,803 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#57 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:01,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440041797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440041798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,804 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/c5956f18420446f9b5067eeeead8a50c is 50, key is test_row_0/C:col10/1731439980446/Put/seqid=0 2024-11-12T19:33:01,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440041805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440041810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741896_1072 (size=12915) 2024-11-12T19:33:01,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440041915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440041917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440041923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:01,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440041924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:01,983 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/9be109b41cfa4173b1934dc79d904bfb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/9be109b41cfa4173b1934dc79d904bfb 2024-11-12T19:33:02,001 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into 9be109b41cfa4173b1934dc79d904bfb(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:02,001 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:02,001 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=13, startTime=1731439981441; duration=0sec 2024-11-12T19:33:02,001 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:02,001 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:33:02,065 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/18b0cb47328648cabc95b6796b2cbb7c 2024-11-12T19:33:02,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/1b0c2124e1264a22be0dbac12016f681 is 50, key is test_row_0/B:col10/1731439980592/Put/seqid=0 2024-11-12T19:33:02,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440042124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440042125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48598 deadline: 1731440042128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,138 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4212 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:02,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440042140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440042141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741897_1073 (size=12301) 2024-11-12T19:33:02,327 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/c5956f18420446f9b5067eeeead8a50c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c5956f18420446f9b5067eeeead8a50c 2024-11-12T19:33:02,381 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into c5956f18420446f9b5067eeeead8a50c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:02,382 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:02,382 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=13, startTime=1731439981442; duration=0sec 2024-11-12T19:33:02,382 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:02,382 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:33:02,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440042430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440042431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440042446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:02,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440042446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:02,563 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/1b0c2124e1264a22be0dbac12016f681 2024-11-12T19:33:02,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/13897cf292e246c599f434e0958aa3e8 is 50, key is test_row_0/C:col10/1731439980592/Put/seqid=0 2024-11-12T19:33:02,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741898_1074 (size=12301) 2024-11-12T19:33:02,631 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/13897cf292e246c599f434e0958aa3e8 2024-11-12T19:33:02,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/18b0cb47328648cabc95b6796b2cbb7c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c 2024-11-12T19:33:02,658 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c, entries=150, sequenceid=285, filesize=12.0 K 2024-11-12T19:33:02,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/1b0c2124e1264a22be0dbac12016f681 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681 2024-11-12T19:33:02,676 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681, entries=150, sequenceid=285, filesize=12.0 K 2024-11-12T19:33:02,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/13897cf292e246c599f434e0958aa3e8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8 2024-11-12T19:33:02,695 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8, entries=150, sequenceid=285, filesize=12.0 K 2024-11-12T19:33:02,698 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 76d7848c1ddd620b84cb604cad3a693a in 1130ms, sequenceid=285, compaction requested=false 2024-11-12T19:33:02,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:02,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:02,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-12T19:33:02,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-12T19:33:02,710 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-12T19:33:02,710 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1030 sec 2024-11-12T19:33:02,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.1160 sec 2024-11-12T19:33:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-12T19:33:02,720 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-12T19:33:02,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:02,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-12T19:33:02,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-12T19:33:02,733 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:02,735 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:02,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:02,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-12T19:33:02,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:02,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-12T19:33:02,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:02,891 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-12T19:33:02,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:02,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ac1cb6606bc243b0bc68d831ab44c286 is 50, key is test_row_0/A:col10/1731439981797/Put/seqid=0 2024-11-12T19:33:02,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741899_1075 (size=12301) 2024-11-12T19:33:02,972 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ac1cb6606bc243b0bc68d831ab44c286 2024-11-12T19:33:02,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/06196e5bcddc4e5b8202411cc936d133 is 50, key is test_row_0/B:col10/1731439981797/Put/seqid=0 2024-11-12T19:33:03,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741900_1076 (size=12301) 2024-11-12T19:33:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-12T19:33:03,037 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/06196e5bcddc4e5b8202411cc936d133 2024-11-12T19:33:03,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/411f42dd2df94923adc5869bf63f40c3 is 50, key is test_row_0/C:col10/1731439981797/Put/seqid=0 2024-11-12T19:33:03,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741901_1077 (size=12301) 2024-11-12T19:33:03,102 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/411f42dd2df94923adc5869bf63f40c3 2024-11-12T19:33:03,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ac1cb6606bc243b0bc68d831ab44c286 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286 2024-11-12T19:33:03,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,134 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286, entries=150, sequenceid=303, filesize=12.0 K 2024-11-12T19:33:03,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/06196e5bcddc4e5b8202411cc936d133 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133 2024-11-12T19:33:03,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,152 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133, entries=150, sequenceid=303, filesize=12.0 K 2024-11-12T19:33:03,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/411f42dd2df94923adc5869bf63f40c3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3 2024-11-12T19:33:03,167 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3, entries=150, sequenceid=303, filesize=12.0 K 2024-11-12T19:33:03,172 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 76d7848c1ddd620b84cb604cad3a693a in 281ms, sequenceid=303, compaction requested=true 2024-11-12T19:33:03,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:03,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-12T19:33:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-12T19:33:03,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-12T19:33:03,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 443 msec 2024-11-12T19:33:03,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 470 msec 2024-11-12T19:33:03,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-12T19:33:03,339 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-12T19:33:03,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:03,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-12T19:33:03,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:03,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:03,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:03,356 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:03,362 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:03,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:03,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/4541586bafbc4cfd82f0d19713e1c21b is 50, key is test_row_0/A:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741902_1078 (size=12301) 2024-11-12T19:33:03,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/4541586bafbc4cfd82f0d19713e1c21b 2024-11-12T19:33:03,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:03,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/79b6f772bc7d455494a6c70ba1fb193b is 50, key is test_row_0/B:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741903_1079 (size=12301) 2024-11-12T19:33:03,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/79b6f772bc7d455494a6c70ba1fb193b 2024-11-12T19:33:03,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:03,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:03,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/91302013e41c4cc4a3773af4b108f7bc is 50, key is test_row_0/C:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741904_1080 (size=12301) 2024-11-12T19:33:03,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/91302013e41c4cc4a3773af4b108f7bc 2024-11-12T19:33:03,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/4541586bafbc4cfd82f0d19713e1c21b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b 2024-11-12T19:33:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:03,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b, entries=150, sequenceid=326, filesize=12.0 K 2024-11-12T19:33:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/79b6f772bc7d455494a6c70ba1fb193b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b 2024-11-12T19:33:03,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b, entries=150, sequenceid=326, filesize=12.0 K 2024-11-12T19:33:03,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/91302013e41c4cc4a3773af4b108f7bc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc 2024-11-12T19:33:03,699 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:03,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:03,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:03,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc, entries=150, sequenceid=326, filesize=12.0 K 2024-11-12T19:33:03,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 76d7848c1ddd620b84cb604cad3a693a in 366ms, sequenceid=326, compaction requested=true 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:03,720 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:03,721 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:03,722 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:33:03,722 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:03,722 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,722 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/9be109b41cfa4173b1934dc79d904bfb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=48.7 K 2024-11-12T19:33:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9be109b41cfa4173b1934dc79d904bfb, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 18b0cb47328648cabc95b6796b2cbb7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731439980577 2024-11-12T19:33:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ac1cb6606bc243b0bc68d831ab44c286, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1731439981784 2024-11-12T19:33:03,724 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4541586bafbc4cfd82f0d19713e1c21b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:03,729 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:03,730 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:33:03,730 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,730 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1cf28ac15f664a58b97ba2f44ea73a3e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=48.7 K 2024-11-12T19:33:03,732 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cf28ac15f664a58b97ba2f44ea73a3e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:03,737 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b0c2124e1264a22be0dbac12016f681, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731439980577 2024-11-12T19:33:03,740 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06196e5bcddc4e5b8202411cc936d133, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1731439981784 2024-11-12T19:33:03,743 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79b6f772bc7d455494a6c70ba1fb193b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:03,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:03,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:03,766 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:03,772 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7b05295aace6433393aea28988867097 is 50, key is test_row_0/A:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/e7e904adbbd54c2aa407735f31be98c8 is 50, key is test_row_0/A:col10/1731439983378/Put/seqid=0 2024-11-12T19:33:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741905_1081 (size=13051) 2024-11-12T19:33:03,812 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7b05295aace6433393aea28988867097 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7b05295aace6433393aea28988867097 2024-11-12T19:33:03,826 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#68 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:03,826 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/a18ead5a335d4d60a9b75113a6c628e6 is 50, key is test_row_0/B:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741906_1082 (size=12301) 2024-11-12T19:33:03,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/e7e904adbbd54c2aa407735f31be98c8 2024-11-12T19:33:03,831 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into 7b05295aace6433393aea28988867097(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:03,831 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:03,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,831 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=12, startTime=1731439983720; duration=0sec 2024-11-12T19:33:03,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,834 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:03,834 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:33:03,834 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:03,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,845 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:03,845 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:33:03,845 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,845 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c5956f18420446f9b5067eeeead8a50c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=48.7 K 2024-11-12T19:33:03,846 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c5956f18420446f9b5067eeeead8a50c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731439980112 2024-11-12T19:33:03,850 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 13897cf292e246c599f434e0958aa3e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731439980577 2024-11-12T19:33:03,851 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 411f42dd2df94923adc5869bf63f40c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1731439981784 2024-11-12T19:33:03,852 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 91302013e41c4cc4a3773af4b108f7bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:03,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:03,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:03,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:03,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:03,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:03,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/27bc1ec4cdf34ed185622624ade9dd72 is 50, key is test_row_0/B:col10/1731439983378/Put/seqid=0 2024-11-12T19:33:03,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741907_1083 (size=13051) 2024-11-12T19:33:03,890 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:03,891 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/488df61fb4914b0ab5ab65a3cfef1323 is 50, key is test_row_0/C:col10/1731439983348/Put/seqid=0 2024-11-12T19:33:03,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741908_1084 (size=12301) 2024-11-12T19:33:03,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440043934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741909_1085 (size=13051) 2024-11-12T19:33:03,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440043951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440043951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:03,961 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/488df61fb4914b0ab5ab65a3cfef1323 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/488df61fb4914b0ab5ab65a3cfef1323 2024-11-12T19:33:03,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440043955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:03,980 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into 488df61fb4914b0ab5ab65a3cfef1323(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:03,980 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:03,980 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=12, startTime=1731439983720; duration=0sec 2024-11-12T19:33:03,980 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:03,980 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:33:04,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:33:04,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440044144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440044155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440044163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440044168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,312 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/a18ead5a335d4d60a9b75113a6c628e6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/a18ead5a335d4d60a9b75113a6c628e6 2024-11-12T19:33:04,340 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into a18ead5a335d4d60a9b75113a6c628e6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:04,340 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:04,340 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=12, startTime=1731439983720; duration=0sec 2024-11-12T19:33:04,340 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:04,340 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:33:04,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/27bc1ec4cdf34ed185622624ade9dd72 2024-11-12T19:33:04,352 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/63da8a9e02954eaba8bc428518a980f2 is 50, key is test_row_0/C:col10/1731439983378/Put/seqid=0 2024-11-12T19:33:04,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741910_1086 (size=12301) 2024-11-12T19:33:04,456 ERROR [LeaseRenewer:jenkins@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:04,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440044459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440044466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440044477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:04,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440044483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:04,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/63da8a9e02954eaba8bc428518a980f2 2024-11-12T19:33:04,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/e7e904adbbd54c2aa407735f31be98c8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8 2024-11-12T19:33:04,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:04,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:04,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:04,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8, entries=150, sequenceid=341, filesize=12.0 K 2024-11-12T19:33:04,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/27bc1ec4cdf34ed185622624ade9dd72 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72 2024-11-12T19:33:04,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72, entries=150, sequenceid=341, filesize=12.0 K 2024-11-12T19:33:04,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/63da8a9e02954eaba8bc428518a980f2 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2 2024-11-12T19:33:04,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2, entries=150, sequenceid=341, filesize=12.0 K 2024-11-12T19:33:04,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 76d7848c1ddd620b84cb604cad3a693a in 1201ms, sequenceid=341, compaction requested=false 2024-11-12T19:33:04,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:04,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:05,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7e2842f459be4ed1959397a3938ad39f is 50, key is test_row_0/A:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:05,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440045025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440045033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440045034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440045039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741911_1087 (size=14741) 2024-11-12T19:33:05,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440045140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440045145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440045147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440045147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,210 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440045347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440045350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440045350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440045351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:05,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7e2842f459be4ed1959397a3938ad39f 2024-11-12T19:33:05,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7f12c692c64745838420b6d555709a6f is 50, key is test_row_0/B:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:05,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741912_1088 (size=12301) 2024-11-12T19:33:05,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48640 deadline: 1731440045656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48658 deadline: 1731440045663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48672 deadline: 1731440045663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48638 deadline: 1731440045679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:05,701 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,769 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e6758ed to 127.0.0.1:60358 2024-11-12T19:33:05,769 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22daddc4 to 127.0.0.1:60358 2024-11-12T19:33:05,769 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:05,769 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:05,771 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f0c7188 to 127.0.0.1:60358 2024-11-12T19:33:05,771 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:05,779 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50c9c1d1 to 127.0.0.1:60358 2024-11-12T19:33:05,779 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:05,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:05,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:05,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:05,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:05,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:05,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7f12c692c64745838420b6d555709a6f 2024-11-12T19:33:05,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ca032adc802f4278b5517c60d2bb1cd6 is 50, key is test_row_0/C:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:05,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741913_1089 (size=12301) 2024-11-12T19:33:05,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ca032adc802f4278b5517c60d2bb1cd6 2024-11-12T19:33:05,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/7e2842f459be4ed1959397a3938ad39f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f 2024-11-12T19:33:05,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f, entries=200, sequenceid=366, filesize=14.4 K 2024-11-12T19:33:06,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/7f12c692c64745838420b6d555709a6f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f 2024-11-12T19:33:06,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f, entries=150, sequenceid=366, filesize=12.0 K 2024-11-12T19:33:06,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/ca032adc802f4278b5517c60d2bb1cd6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6 2024-11-12T19:33:06,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6, entries=150, sequenceid=366, filesize=12.0 K 2024-11-12T19:33:06,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 76d7848c1ddd620b84cb604cad3a693a in 1040ms, sequenceid=366, compaction requested=true 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 76d7848c1ddd620b84cb604cad3a693a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:06,020 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:06,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:06,022 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:06,022 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/A is initiating minor compaction (all files) 2024-11-12T19:33:06,022 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/A in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,022 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:06,022 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7b05295aace6433393aea28988867097, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=39.2 K 2024-11-12T19:33:06,031 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b05295aace6433393aea28988867097, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:06,039 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7e904adbbd54c2aa407735f31be98c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1731439983378 2024-11-12T19:33:06,039 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:06,039 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/B is initiating minor compaction (all files) 2024-11-12T19:33:06,039 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/B in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,039 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/a18ead5a335d4d60a9b75113a6c628e6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=36.8 K 2024-11-12T19:33:06,043 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e2842f459be4ed1959397a3938ad39f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731439983807 2024-11-12T19:33:06,047 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a18ead5a335d4d60a9b75113a6c628e6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:06,051 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 27bc1ec4cdf34ed185622624ade9dd72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1731439983378 2024-11-12T19:33:06,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f12c692c64745838420b6d555709a6f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731439983807 2024-11-12T19:33:06,081 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#A#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:06,084 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/73d775f62a6b41e7a578cd0f07795729 is 50, key is test_row_0/A:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:06,094 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#B#compaction#76 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:06,096 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/0b1d4fb377484945a1979dadd082cbea is 50, key is test_row_0/B:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:06,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741914_1090 (size=13153) 2024-11-12T19:33:06,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741915_1091 (size=13153) 2024-11-12T19:33:06,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:06,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-12T19:33:06,172 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x305f2915 to 127.0.0.1:60358 2024-11-12T19:33:06,172 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:06,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:06,173 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cac4303 to 127.0.0.1:60358 2024-11-12T19:33:06,173 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:06,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:06,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:06,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:06,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:06,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,183 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30c68ddf to 127.0.0.1:60358 2024-11-12T19:33:06,183 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:06,183 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d7115de to 127.0.0.1:60358 2024-11-12T19:33:06,183 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:06,194 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ec99212 to 127.0.0.1:60358 2024-11-12T19:33:06,194 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:06,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b417ffbbff5c4434bc3bf8325d79c35e is 50, key is test_row_0/A:col10/1731439986170/Put/seqid=0 2024-11-12T19:33:06,208 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/73d775f62a6b41e7a578cd0f07795729 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/73d775f62a6b41e7a578cd0f07795729 2024-11-12T19:33:06,210 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/0b1d4fb377484945a1979dadd082cbea as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0b1d4fb377484945a1979dadd082cbea 2024-11-12T19:33:06,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741916_1092 (size=12301) 2024-11-12T19:33:06,251 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/A of 76d7848c1ddd620b84cb604cad3a693a into 73d775f62a6b41e7a578cd0f07795729(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:06,251 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:06,251 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/A, priority=13, startTime=1731439986020; duration=0sec 2024-11-12T19:33:06,251 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:06,251 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:A 2024-11-12T19:33:06,251 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:06,254 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:06,255 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 76d7848c1ddd620b84cb604cad3a693a/C is initiating minor compaction (all files) 2024-11-12T19:33:06,255 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 76d7848c1ddd620b84cb604cad3a693a/C in TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,255 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/488df61fb4914b0ab5ab65a3cfef1323, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp, totalSize=36.8 K 2024-11-12T19:33:06,257 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/B of 76d7848c1ddd620b84cb604cad3a693a into 0b1d4fb377484945a1979dadd082cbea(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:06,258 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:06,258 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/B, priority=13, startTime=1731439986020; duration=0sec 2024-11-12T19:33:06,258 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:06,258 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:B 2024-11-12T19:33:06,259 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 488df61fb4914b0ab5ab65a3cfef1323, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731439982998 2024-11-12T19:33:06,260 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63da8a9e02954eaba8bc428518a980f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1731439983378 2024-11-12T19:33:06,263 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca032adc802f4278b5517c60d2bb1cd6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731439983807 2024-11-12T19:33:06,321 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 76d7848c1ddd620b84cb604cad3a693a#C#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:06,322 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/d76418edeca64517aa09056d654318f6 is 50, key is test_row_0/C:col10/1731439983827/Put/seqid=0 2024-11-12T19:33:06,330 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741917_1093 (size=13153) 2024-11-12T19:33:06,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,644 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b417ffbbff5c4434bc3bf8325d79c35e 2024-11-12T19:33:06,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/283d0b10f3194c5b98dca015fcfc1f8a is 50, key is test_row_0/B:col10/1731439986170/Put/seqid=0 2024-11-12T19:33:06,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741918_1094 (size=12301) 2024-11-12T19:33:06,793 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/d76418edeca64517aa09056d654318f6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/d76418edeca64517aa09056d654318f6 2024-11-12T19:33:06,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,802 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76d7848c1ddd620b84cb604cad3a693a/C of 76d7848c1ddd620b84cb604cad3a693a into d76418edeca64517aa09056d654318f6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:06,802 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:06,802 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a., storeName=76d7848c1ddd620b84cb604cad3a693a/C, priority=13, startTime=1731439986020; duration=0sec 2024-11-12T19:33:06,803 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:06,803 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 76d7848c1ddd620b84cb604cad3a693a:C 2024-11-12T19:33:06,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:06,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:06,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:06,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:06,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:06,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:07,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/283d0b10f3194c5b98dca015fcfc1f8a 2024-11-12T19:33:07,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/81ea2587ee62437792ef226d2bd0212a is 50, key is test_row_0/C:col10/1731439986170/Put/seqid=0 2024-11-12T19:33:07,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:07,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:07,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:07,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. as already flushing 2024-11-12T19:33:07,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:07,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:07,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:07,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741919_1095 (size=12301) 2024-11-12T19:33:07,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/81ea2587ee62437792ef226d2bd0212a 2024-11-12T19:33:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/b417ffbbff5c4434bc3bf8325d79c35e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b417ffbbff5c4434bc3bf8325d79c35e 2024-11-12T19:33:07,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b417ffbbff5c4434bc3bf8325d79c35e, entries=150, sequenceid=383, filesize=12.0 K 2024-11-12T19:33:07,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/283d0b10f3194c5b98dca015fcfc1f8a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/283d0b10f3194c5b98dca015fcfc1f8a 2024-11-12T19:33:07,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/283d0b10f3194c5b98dca015fcfc1f8a, entries=150, sequenceid=383, filesize=12.0 K 2024-11-12T19:33:07,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/81ea2587ee62437792ef226d2bd0212a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/81ea2587ee62437792ef226d2bd0212a 2024-11-12T19:33:07,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/81ea2587ee62437792ef226d2bd0212a, entries=150, sequenceid=383, filesize=12.0 K 2024-11-12T19:33:07,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=20.13 KB/20610 for 76d7848c1ddd620b84cb604cad3a693a in 1000ms, sequenceid=383, compaction requested=false 2024-11-12T19:33:07,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:07,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:07,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-12T19:33:07,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:07,259 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 76d7848c1ddd620b84cb604cad3a693a 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-12T19:33:07,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=A 2024-11-12T19:33:07,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:07,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=B 2024-11-12T19:33:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 76d7848c1ddd620b84cb604cad3a693a, store=C 2024-11-12T19:33:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:07,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ebd8b1887f2b481da3e771384e1f111d is 50, key is test_row_0/A:col10/1731439986188/Put/seqid=0 2024-11-12T19:33:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741920_1096 (size=9857) 2024-11-12T19:33:07,275 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ebd8b1887f2b481da3e771384e1f111d 2024-11-12T19:33:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/977218e45b6c429cb2f843ccc499bd35 is 50, key is test_row_0/B:col10/1731439986188/Put/seqid=0 2024-11-12T19:33:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741921_1097 (size=9857) 2024-11-12T19:33:07,300 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/977218e45b6c429cb2f843ccc499bd35 2024-11-12T19:33:07,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/1529c3dcb5114aa3a85d0f90abd742e4 is 50, key is test_row_0/C:col10/1731439986188/Put/seqid=0 2024-11-12T19:33:07,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741922_1098 (size=9857) 2024-11-12T19:33:07,368 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/1529c3dcb5114aa3a85d0f90abd742e4 2024-11-12T19:33:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/A/ebd8b1887f2b481da3e771384e1f111d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ebd8b1887f2b481da3e771384e1f111d 2024-11-12T19:33:07,397 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ebd8b1887f2b481da3e771384e1f111d, entries=100, sequenceid=392, filesize=9.6 K 2024-11-12T19:33:07,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/B/977218e45b6c429cb2f843ccc499bd35 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/977218e45b6c429cb2f843ccc499bd35 2024-11-12T19:33:07,427 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/977218e45b6c429cb2f843ccc499bd35, entries=100, sequenceid=392, filesize=9.6 K 2024-11-12T19:33:07,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/.tmp/C/1529c3dcb5114aa3a85d0f90abd742e4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/1529c3dcb5114aa3a85d0f90abd742e4 2024-11-12T19:33:07,436 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/1529c3dcb5114aa3a85d0f90abd742e4, entries=100, sequenceid=392, filesize=9.6 K 2024-11-12T19:33:07,437 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 76d7848c1ddd620b84cb604cad3a693a in 177ms, sequenceid=392, compaction requested=true 2024-11-12T19:33:07,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:07,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:07,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-12T19:33:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-12T19:33:07,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-12T19:33:07,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0840 sec 2024-11-12T19:33:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:07,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 4.1120 sec 2024-11-12T19:33:10,485 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T19:33:10,488 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T19:33:11,256 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-12T19:33:11,462 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2355 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2298 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1017 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3051 rows 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1043 2024-11-12T19:33:11,463 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3127 rows 2024-11-12T19:33:11,463 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:33:11,463 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f2052a7 to 127.0.0.1:60358 2024-11-12T19:33:11,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:11,470 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:33:11,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:33:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:11,490 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439991490"}]},"ts":"1731439991490"} 2024-11-12T19:33:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-12T19:33:11,492 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:33:11,586 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:33:11,589 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:33:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-12T19:33:11,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, UNASSIGN}] 2024-11-12T19:33:11,597 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, UNASSIGN 2024-11-12T19:33:11,598 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=76d7848c1ddd620b84cb604cad3a693a, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:11,599 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:33:11,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:11,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:11,759 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:11,759 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:33:11,760 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 76d7848c1ddd620b84cb604cad3a693a, disabling compactions & flushes 2024-11-12T19:33:11,760 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:11,760 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:11,760 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. after waiting 0 ms 2024-11-12T19:33:11,760 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:11,763 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/35c96e4f9f124a28a942c6be5680a9be, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/c19a62eb5e784775b1a5e17670aab142, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/f455d468bac24c55b013be600e0f3311, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e73405f3289d43bb9230928cccbf2287, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/9be109b41cfa4173b1934dc79d904bfb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7b05295aace6433393aea28988867097, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f] to archive 2024-11-12T19:33:11,767 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:11,781 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/abbf0f7982344ed19cf378f41ab9cd9e 2024-11-12T19:33:11,787 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/48106f0056ba4091856ffac5e0f51507 2024-11-12T19:33:11,789 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/07fc215acdf64657a1ab5ccb84ca050b 2024-11-12T19:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-12T19:33:11,795 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/35c96e4f9f124a28a942c6be5680a9be to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/35c96e4f9f124a28a942c6be5680a9be 2024-11-12T19:33:11,801 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5e571d64ae784de09638afbf0aab5fbc 2024-11-12T19:33:11,805 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/15c2737e1dec4f8b8968c77df10e3460 2024-11-12T19:33:11,808 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/c19a62eb5e784775b1a5e17670aab142 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/c19a62eb5e784775b1a5e17670aab142 2024-11-12T19:33:11,814 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b6337f5a5d264843b04ead124a9e055c 2024-11-12T19:33:11,820 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/89486f468dac43c99f000fc82b8413a6 2024-11-12T19:33:11,825 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/fa0fbe223b8c4e7d82e8b580f2dc30c9 2024-11-12T19:33:11,829 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/f455d468bac24c55b013be600e0f3311 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/f455d468bac24c55b013be600e0f3311 2024-11-12T19:33:11,834 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7a36bfa6161548de99fb4e92dd5c7f88 2024-11-12T19:33:11,838 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/5f664198be454cf0beac10e73e45b436 2024-11-12T19:33:11,846 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/a32e65acab3c4074a13e2f9c26f896ac 2024-11-12T19:33:11,861 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/93730cc0ad84445ebb2971ad87eaeb74 2024-11-12T19:33:11,869 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e73405f3289d43bb9230928cccbf2287 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e73405f3289d43bb9230928cccbf2287 2024-11-12T19:33:11,875 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/182e0a1b020a4a1e9dbfa79a22c154d0 2024-11-12T19:33:11,882 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/9be109b41cfa4173b1934dc79d904bfb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/9be109b41cfa4173b1934dc79d904bfb 2024-11-12T19:33:11,885 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/1ff506b0d7d8423d8b381f962071495d 2024-11-12T19:33:11,890 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/18b0cb47328648cabc95b6796b2cbb7c 2024-11-12T19:33:11,892 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ac1cb6606bc243b0bc68d831ab44c286 2024-11-12T19:33:11,894 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7b05295aace6433393aea28988867097 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7b05295aace6433393aea28988867097 2024-11-12T19:33:11,897 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/4541586bafbc4cfd82f0d19713e1c21b 2024-11-12T19:33:11,899 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/e7e904adbbd54c2aa407735f31be98c8 2024-11-12T19:33:11,902 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/7e2842f459be4ed1959397a3938ad39f 2024-11-12T19:33:11,938 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/25929b3e769148cea2d5430b1888263e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bc42a5286c164fa0961edddb987694b7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/46b42fa553ed4610a728ba53bf2900ba, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/8de19df865084f74b25cb1a5058adf11, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1cf28ac15f664a58b97ba2f44ea73a3e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/a18ead5a335d4d60a9b75113a6c628e6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f] to archive 2024-11-12T19:33:11,940 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:11,950 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/d8edb657ee6e4630b9a05892602947f3 2024-11-12T19:33:11,959 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/5842002a142d4ca984231d1b5bdb4e6a 2024-11-12T19:33:11,963 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/25929b3e769148cea2d5430b1888263e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/25929b3e769148cea2d5430b1888263e 2024-11-12T19:33:11,968 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bd9ef219c7884e2886f0c43acc4e5b32 2024-11-12T19:33:11,971 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/631b9f84ab7d44349993393747dab717 2024-11-12T19:33:11,974 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7fb5610d3c10482d818eb5a728a315ae 2024-11-12T19:33:11,979 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bc42a5286c164fa0961edddb987694b7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bc42a5286c164fa0961edddb987694b7 2024-11-12T19:33:11,981 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/3a33e9f7ebdb402bb894fb8d8f9e9ccd 2024-11-12T19:33:11,983 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0d1e3835791b4dd9ab4349f5563f7e5e 2024-11-12T19:33:11,986 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/f1c8c8c1b11d4282b99b59d3afc50ec1 2024-11-12T19:33:11,988 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/46b42fa553ed4610a728ba53bf2900ba to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/46b42fa553ed4610a728ba53bf2900ba 2024-11-12T19:33:11,992 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/6239bbc0e75c4ee1806ebd169162cf75 2024-11-12T19:33:11,995 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/b9ec00f53c2b4b898bb34ba3e0058083 2024-11-12T19:33:11,997 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/69a2881f5f9445e78e4ef3433f84eac7 2024-11-12T19:33:12,003 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/8de19df865084f74b25cb1a5058adf11 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/8de19df865084f74b25cb1a5058adf11 2024-11-12T19:33:12,012 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/9ba3220e3d7f4a57a8f3724cc1f74e1d 2024-11-12T19:33:12,017 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/bccef6e4f4534c1c9da83737cfcc56ab 2024-11-12T19:33:12,023 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1cf28ac15f664a58b97ba2f44ea73a3e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1cf28ac15f664a58b97ba2f44ea73a3e 2024-11-12T19:33:12,026 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/34ed7949673b4143a0c5bc72c2add754 2024-11-12T19:33:12,032 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/1b0c2124e1264a22be0dbac12016f681 2024-11-12T19:33:12,034 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/06196e5bcddc4e5b8202411cc936d133 2024-11-12T19:33:12,036 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/a18ead5a335d4d60a9b75113a6c628e6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/a18ead5a335d4d60a9b75113a6c628e6 2024-11-12T19:33:12,038 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/79b6f772bc7d455494a6c70ba1fb193b 2024-11-12T19:33:12,041 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/27bc1ec4cdf34ed185622624ade9dd72 2024-11-12T19:33:12,046 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/7f12c692c64745838420b6d555709a6f 2024-11-12T19:33:12,050 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/60f74d7ecd224644a1587adc44c0520d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/95737f6da8df4c0db162e3ea7b005bca, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/24287f2f376848dbb8bd7870d2754f91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4421682134d6468cba981f4173014b95, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c5956f18420446f9b5067eeeead8a50c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/488df61fb4914b0ab5ab65a3cfef1323, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6] to archive 2024-11-12T19:33:12,051 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:12,053 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/cb60be88054a4dfa986eb3d02d810764 2024-11-12T19:33:12,055 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/9df159fbed6e431c9029d6ed8ecb9347 2024-11-12T19:33:12,058 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/60f74d7ecd224644a1587adc44c0520d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/60f74d7ecd224644a1587adc44c0520d 2024-11-12T19:33:12,060 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/fb99e15af6d442bfb168a22becddadb8 2024-11-12T19:33:12,064 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4c8946d6fdf142cfbd77e197992fb179 2024-11-12T19:33:12,067 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/6f8954daf67e4df494af9f5eefe7ec43 2024-11-12T19:33:12,071 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/95737f6da8df4c0db162e3ea7b005bca to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/95737f6da8df4c0db162e3ea7b005bca 2024-11-12T19:33:12,074 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/b1ceace1e43f4fdbafca2fe8e2c2abb3 2024-11-12T19:33:12,076 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/25a9769b81894f22b1791a6742ca2736 2024-11-12T19:33:12,080 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/50f57c4a963640be9972623c1d49aa21 2024-11-12T19:33:12,082 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/24287f2f376848dbb8bd7870d2754f91 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/24287f2f376848dbb8bd7870d2754f91 2024-11-12T19:33:12,085 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/14ccf3a4965f471e8d438be24b0bd615 2024-11-12T19:33:12,089 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/174d8a16a436415fb1a6b2251a1caf67 2024-11-12T19:33:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-12T19:33:12,098 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/04e95edc86d9448a8f214c79f9d606de 2024-11-12T19:33:12,110 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4421682134d6468cba981f4173014b95 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/4421682134d6468cba981f4173014b95 2024-11-12T19:33:12,112 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/7b9021c4a51f40839a73522c9c358da4 2024-11-12T19:33:12,116 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c1cac6fed98b430982c3029e46835e34 2024-11-12T19:33:12,121 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c5956f18420446f9b5067eeeead8a50c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/c5956f18420446f9b5067eeeead8a50c 2024-11-12T19:33:12,125 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ddc22f915e3c40188f60c90aeed9443a 2024-11-12T19:33:12,142 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/13897cf292e246c599f434e0958aa3e8 2024-11-12T19:33:12,153 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/411f42dd2df94923adc5869bf63f40c3 2024-11-12T19:33:12,158 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/488df61fb4914b0ab5ab65a3cfef1323 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/488df61fb4914b0ab5ab65a3cfef1323 2024-11-12T19:33:12,165 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/91302013e41c4cc4a3773af4b108f7bc 2024-11-12T19:33:12,168 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/63da8a9e02954eaba8bc428518a980f2 2024-11-12T19:33:12,171 DEBUG [StoreCloser-TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/ca032adc802f4278b5517c60d2bb1cd6 2024-11-12T19:33:12,199 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/recovered.edits/395.seqid, newMaxSeqId=395, maxSeqId=1 2024-11-12T19:33:12,209 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a. 2024-11-12T19:33:12,209 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 76d7848c1ddd620b84cb604cad3a693a: 2024-11-12T19:33:12,220 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:12,223 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=76d7848c1ddd620b84cb604cad3a693a, regionState=CLOSED 2024-11-12T19:33:12,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-12T19:33:12,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 76d7848c1ddd620b84cb604cad3a693a, server=81d69e608036,33067,1731439956493 in 630 msec 2024-11-12T19:33:12,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-12T19:33:12,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=76d7848c1ddd620b84cb604cad3a693a, UNASSIGN in 645 msec 2024-11-12T19:33:12,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-12T19:33:12,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 667 msec 2024-11-12T19:33:12,272 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439992272"}]},"ts":"1731439992272"} 2024-11-12T19:33:12,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:33:12,286 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:33:12,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 810 msec 2024-11-12T19:33:12,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-12T19:33:12,598 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-12T19:33:12,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:33:12,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,610 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,612 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-12T19:33:12,619 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:12,642 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/recovered.edits] 2024-11-12T19:33:12,669 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/73d775f62a6b41e7a578cd0f07795729 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/73d775f62a6b41e7a578cd0f07795729 2024-11-12T19:33:12,675 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b417ffbbff5c4434bc3bf8325d79c35e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/b417ffbbff5c4434bc3bf8325d79c35e 2024-11-12T19:33:12,687 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ebd8b1887f2b481da3e771384e1f111d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/A/ebd8b1887f2b481da3e771384e1f111d 2024-11-12T19:33:12,708 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0b1d4fb377484945a1979dadd082cbea to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/0b1d4fb377484945a1979dadd082cbea 2024-11-12T19:33:12,715 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/283d0b10f3194c5b98dca015fcfc1f8a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/283d0b10f3194c5b98dca015fcfc1f8a 2024-11-12T19:33:12,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-12T19:33:12,726 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/977218e45b6c429cb2f843ccc499bd35 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/B/977218e45b6c429cb2f843ccc499bd35 2024-11-12T19:33:12,756 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/1529c3dcb5114aa3a85d0f90abd742e4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/1529c3dcb5114aa3a85d0f90abd742e4 2024-11-12T19:33:12,769 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/81ea2587ee62437792ef226d2bd0212a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/81ea2587ee62437792ef226d2bd0212a 2024-11-12T19:33:12,776 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/d76418edeca64517aa09056d654318f6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/C/d76418edeca64517aa09056d654318f6 2024-11-12T19:33:12,788 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/recovered.edits/395.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a/recovered.edits/395.seqid 2024-11-12T19:33:12,790 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/76d7848c1ddd620b84cb604cad3a693a 2024-11-12T19:33:12,791 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:33:12,800 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-12T19:33:12,820 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:33:12,880 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:33:12,891 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,891 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:33:12,892 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731439992891"}]},"ts":"9223372036854775807"} 2024-11-12T19:33:12,903 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:33:12,904 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 76d7848c1ddd620b84cb604cad3a693a, NAME => 'TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:33:12,904 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:33:12,904 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731439992904"}]},"ts":"9223372036854775807"} 2024-11-12T19:33:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-12T19:33:12,928 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:33:12,947 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:12,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 344 msec 2024-11-12T19:33:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-12T19:33:13,230 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-12T19:33:13,246 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=235 (was 218) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/81d69e608036:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/81d69e608036:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;81d69e608036:33067-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1607 (was 1283) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2247 (was 3502) 2024-11-12T19:33:13,256 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=235, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=1607, ProcessCount=11, AvailableMemoryMB=2247 2024-11-12T19:33:13,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:33:13,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:33:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:13,261 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:33:13,262 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:13,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-11-12T19:33:13,263 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:33:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-12T19:33:13,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741923_1099 (size=960) 2024-11-12T19:33:13,301 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:33:13,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741924_1100 (size=53) 2024-11-12T19:33:13,316 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:33:13,316 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5e91f676554be63c9f656bc420de8a2a, disabling compactions & flushes 2024-11-12T19:33:13,316 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. after waiting 0 ms 2024-11-12T19:33:13,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,317 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:13,318 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:33:13,319 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731439993318"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731439993318"}]},"ts":"1731439993318"} 2024-11-12T19:33:13,324 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:33:13,326 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:33:13,326 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439993326"}]},"ts":"1731439993326"} 2024-11-12T19:33:13,328 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:33:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-12T19:33:13,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, ASSIGN}] 2024-11-12T19:33:13,380 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, ASSIGN 2024-11-12T19:33:13,381 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:33:13,532 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:13,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-12T19:33:13,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:13,692 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,692 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:33:13,693 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,693 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:33:13,693 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,693 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,695 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,699 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:13,700 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName A 2024-11-12T19:33:13,700 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:13,702 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:13,702 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,708 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:13,709 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName B 2024-11-12T19:33:13,709 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:13,713 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:13,714 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,723 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:13,723 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName C 2024-11-12T19:33:13,723 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:13,730 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:13,730 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,731 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,732 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,734 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:33:13,736 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:13,740 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:33:13,741 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 5e91f676554be63c9f656bc420de8a2a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72850753, jitterRate=0.08556081354618073}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:33:13,742 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:13,743 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., pid=37, masterSystemTime=1731439993687 2024-11-12T19:33:13,745 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,746 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:13,746 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:13,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-12T19:33:13,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 in 214 msec 2024-11-12T19:33:13,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-12T19:33:13,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, ASSIGN in 374 msec 2024-11-12T19:33:13,763 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:33:13,763 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731439993763"}]},"ts":"1731439993763"} 2024-11-12T19:33:13,769 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:33:13,800 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:33:13,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 541 msec 2024-11-12T19:33:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-12T19:33:13,868 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-11-12T19:33:13,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df308e2 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a86f6a4 2024-11-12T19:33:13,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c267206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:13,916 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:13,919 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:13,935 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:33:13,938 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:33:13,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:33:13,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:33:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:14,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741925_1101 (size=996) 2024-11-12T19:33:14,437 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-12T19:33:14,437 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-12T19:33:14,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:33:14,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, REOPEN/MOVE}] 2024-11-12T19:33:14,454 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, REOPEN/MOVE 2024-11-12T19:33:14,459 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:14,461 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:33:14,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:14,613 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:14,614 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,614 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:33:14,614 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 5e91f676554be63c9f656bc420de8a2a, disabling compactions & flushes 2024-11-12T19:33:14,614 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,614 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,614 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. after waiting 0 ms 2024-11-12T19:33:14,614 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,618 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-12T19:33:14,619 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,619 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:14,619 WARN [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 5e91f676554be63c9f656bc420de8a2a to self. 2024-11-12T19:33:14,621 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,622 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=CLOSED 2024-11-12T19:33:14,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-12T19:33:14,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 in 162 msec 2024-11-12T19:33:14,626 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, REOPEN/MOVE; state=CLOSED, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=true 2024-11-12T19:33:14,776 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:14,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:14,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:14,934 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,934 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:33:14,935 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,935 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:33:14,935 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,935 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,938 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,939 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:14,946 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName A 2024-11-12T19:33:14,949 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:14,949 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:14,950 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,951 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:14,951 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName B 2024-11-12T19:33:14,951 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:14,952 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:14,952 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,953 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:14,954 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e91f676554be63c9f656bc420de8a2a columnFamilyName C 2024-11-12T19:33:14,954 DEBUG [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:14,955 INFO [StoreOpener-5e91f676554be63c9f656bc420de8a2a-1 {}] regionserver.HStore(327): Store=5e91f676554be63c9f656bc420de8a2a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:14,955 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,956 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,958 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,960 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:33:14,962 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:14,964 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 5e91f676554be63c9f656bc420de8a2a; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62743727, jitterRate=-0.06504561007022858}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:33:14,966 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:14,967 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., pid=42, masterSystemTime=1731439994930 2024-11-12T19:33:14,970 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=OPEN, openSeqNum=5, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:14,970 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,971 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:14,975 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-11-12T19:33:14,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 in 195 msec 2024-11-12T19:33:14,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-12T19:33:14,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, REOPEN/MOVE in 523 msec 2024-11-12T19:33:14,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-12T19:33:14,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 543 msec 2024-11-12T19:33:15,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 1.0480 sec 2024-11-12T19:33:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-12T19:33:15,015 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebe01f4 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40c21799 2024-11-12T19:33:15,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a0aa7d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,065 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x515fd839 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@87b269f 2024-11-12T19:33:15,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@231f064, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,114 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x57449e06 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f4859f4 2024-11-12T19:33:15,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ecfd53a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35b51e5d to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1eb823f7 2024-11-12T19:33:15,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d6eb994, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,181 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74be9bc0 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24ebde20 2024-11-12T19:33:15,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c517130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,205 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40832d66 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@776c0cb7 2024-11-12T19:33:15,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555bfdff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29dad7a8 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ec46f90 2024-11-12T19:33:15,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@347ad9b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,286 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c6fdab to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f63b68c 2024-11-12T19:33:15,323 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d36579b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,325 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x250a1de4 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473f181f 2024-11-12T19:33:15,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@681a05ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:15,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-12T19:33:15,395 DEBUG [hconnection-0x5436e126-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:15,396 DEBUG [hconnection-0x615c40c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,397 DEBUG [hconnection-0x6c5ee15f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,398 DEBUG [hconnection-0x199dd65f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,399 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:15,403 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:15,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:15,403 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50514, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,403 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,404 DEBUG [hconnection-0x5934e12e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,406 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,408 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,415 DEBUG [hconnection-0x5092a64e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,419 DEBUG [hconnection-0x4938e315-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,420 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,421 DEBUG [hconnection-0x8ea70a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,422 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,423 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,425 DEBUG [hconnection-0x52827763-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:15,425 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,429 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:15,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:15,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:15,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:15,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:15,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:15,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:15,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:15,567 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:15,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:15,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440055572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440055573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440055578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440055578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440055596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112169b1d3ae0714ee9a2f0a4af6251d3a7_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439995478/Put/seqid=0 2024-11-12T19:33:15,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741926_1102 (size=12154) 2024-11-12T19:33:15,651 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:15,658 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112169b1d3ae0714ee9a2f0a4af6251d3a7_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112169b1d3ae0714ee9a2f0a4af6251d3a7_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:15,660 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/cd3328313c2f4cdf8c257b2dd31e6c66, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:15,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/cd3328313c2f4cdf8c257b2dd31e6c66 is 175, key is test_row_0/A:col10/1731439995478/Put/seqid=0 2024-11-12T19:33:15,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440055685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741927_1103 (size=30955) 2024-11-12T19:33:15,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:15,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440055712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,716 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/cd3328313c2f4cdf8c257b2dd31e6c66 2024-11-12T19:33:15,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:15,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:15,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:15,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440055725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440055725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440055725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/e0af73c0ae884f2ba846b2904f7466d1 is 50, key is test_row_0/B:col10/1731439995478/Put/seqid=0 2024-11-12T19:33:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741928_1104 (size=12001) 2024-11-12T19:33:15,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/e0af73c0ae884f2ba846b2904f7466d1 2024-11-12T19:33:15,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/cb5baac74bb7420b8d028d1805820498 is 50, key is test_row_0/C:col10/1731439995478/Put/seqid=0 2024-11-12T19:33:15,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:15,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:15,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:15,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:15,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:15,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440055889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440055915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741929_1105 (size=12001) 2024-11-12T19:33:15,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440055935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440055935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:15,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:15,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440055936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:16,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:16,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:16,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:16,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:16,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:16,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:16,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440056203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440056227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440056248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440056249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440056244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/cb5baac74bb7420b8d028d1805820498 2024-11-12T19:33:16,352 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:16,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:16,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/cd3328313c2f4cdf8c257b2dd31e6c66 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66 2024-11-12T19:33:16,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66, entries=150, sequenceid=17, filesize=30.2 K 2024-11-12T19:33:16,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/e0af73c0ae884f2ba846b2904f7466d1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1 2024-11-12T19:33:16,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1, entries=150, sequenceid=17, filesize=11.7 K 2024-11-12T19:33:16,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/cb5baac74bb7420b8d028d1805820498 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498 2024-11-12T19:33:16,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498, entries=150, sequenceid=17, filesize=11.7 K 2024-11-12T19:33:16,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5e91f676554be63c9f656bc420de8a2a in 935ms, sequenceid=17, compaction requested=false 2024-11-12T19:33:16,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:16,515 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:16,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-12T19:33:16,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,516 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-12T19:33:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:16,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:16,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411123bbef9b2e1a74c7997b95f96e6d54300_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439995574/Put/seqid=0 2024-11-12T19:33:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741930_1106 (size=12154) 2024-11-12T19:33:16,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:16,599 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411123bbef9b2e1a74c7997b95f96e6d54300_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123bbef9b2e1a74c7997b95f96e6d54300_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:16,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5b7d09d1d14046fcabbf71f49dba3142, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:16,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5b7d09d1d14046fcabbf71f49dba3142 is 175, key is test_row_0/A:col10/1731439995574/Put/seqid=0 2024-11-12T19:33:16,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741931_1107 (size=30955) 2024-11-12T19:33:16,632 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5b7d09d1d14046fcabbf71f49dba3142 2024-11-12T19:33:16,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f3194d706a7b451e93586c6c5c2cf5ae is 50, key is test_row_0/B:col10/1731439995574/Put/seqid=0 2024-11-12T19:33:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741932_1108 (size=12001) 2024-11-12T19:33:16,713 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f3194d706a7b451e93586c6c5c2cf5ae 2024-11-12T19:33:16,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:16,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:16,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/e720d9e706e7447d83d7609839ca4152 is 50, key is test_row_0/C:col10/1731439995574/Put/seqid=0 2024-11-12T19:33:16,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440056758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440056760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440056762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440056762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440056767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741933_1109 (size=12001) 2024-11-12T19:33:16,810 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/e720d9e706e7447d83d7609839ca4152 2024-11-12T19:33:16,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5b7d09d1d14046fcabbf71f49dba3142 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142 2024-11-12T19:33:16,836 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142, entries=150, sequenceid=41, filesize=30.2 K 2024-11-12T19:33:16,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f3194d706a7b451e93586c6c5c2cf5ae as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae 2024-11-12T19:33:16,849 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae, entries=150, sequenceid=41, filesize=11.7 K 2024-11-12T19:33:16,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/e720d9e706e7447d83d7609839ca4152 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152 2024-11-12T19:33:16,867 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152, entries=150, sequenceid=41, filesize=11.7 K 2024-11-12T19:33:16,871 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5e91f676554be63c9f656bc420de8a2a in 355ms, sequenceid=41, compaction requested=false 2024-11-12T19:33:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-12T19:33:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-12T19:33:16,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:16,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-12T19:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:16,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4720 sec 2024-11-12T19:33:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:16,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.4950 sec 2024-11-12T19:33:16,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112267977279bd4401eabc1b8acf7240731_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:16,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741934_1110 (size=14594) 2024-11-12T19:33:16,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440056983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440056987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:16,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440056991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440057093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440057093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440057095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440057303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440057307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440057315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,387 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:17,408 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112267977279bd4401eabc1b8acf7240731_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112267977279bd4401eabc1b8acf7240731_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:17,419 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/2b76a4fc08894df7af9b6581fb403c1f, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:17,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/2b76a4fc08894df7af9b6581fb403c1f is 175, key is test_row_0/A:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:17,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741935_1111 (size=39549) 2024-11-12T19:33:17,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-12T19:33:17,518 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-12T19:33:17,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:17,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-12T19:33:17,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:17,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:17,537 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:17,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:17,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440057609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440057615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:17,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440057629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:17,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:17,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,765 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:33:17,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440057775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440057788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:17,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:17,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:17,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:17,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:17,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:17,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:17,895 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/2b76a4fc08894df7af9b6581fb403c1f 2024-11-12T19:33:17,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f26248b16c5a409f93f106d1e9d16768 is 50, key is test_row_0/B:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741936_1112 (size=12001) 2024-11-12T19:33:17,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f26248b16c5a409f93f106d1e9d16768 2024-11-12T19:33:17,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4537c2580ee3433b9edb47c0c854a75f is 50, key is test_row_0/C:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:18,007 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741937_1113 (size=12001) 2024-11-12T19:33:18,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4537c2580ee3433b9edb47c0c854a75f 2024-11-12T19:33:18,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/2b76a4fc08894df7af9b6581fb403c1f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f 2024-11-12T19:33:18,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f, entries=200, sequenceid=54, filesize=38.6 K 2024-11-12T19:33:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/f26248b16c5a409f93f106d1e9d16768 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768 2024-11-12T19:33:18,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768, entries=150, sequenceid=54, filesize=11.7 K 2024-11-12T19:33:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4537c2580ee3433b9edb47c0c854a75f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f 2024-11-12T19:33:18,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440058127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440058127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:18,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f, entries=150, sequenceid=54, filesize=11.7 K 2024-11-12T19:33:18,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5e91f676554be63c9f656bc420de8a2a in 1284ms, sequenceid=54, compaction requested=true 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:18,163 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:18,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:18,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:18,164 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:18,167 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:18,167 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:18,167 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,167 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=99.1 K 2024-11-12T19:33:18,167 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,168 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f] 2024-11-12T19:33:18,171 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting cd3328313c2f4cdf8c257b2dd31e6c66, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731439995471 2024-11-12T19:33:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:18,172 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b7d09d1d14046fcabbf71f49dba3142, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731439995571 2024-11-12T19:33:18,173 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b76a4fc08894df7af9b6581fb403c1f, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:18,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-12T19:33:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:18,176 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:18,176 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:18,176 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,176 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.2 K 2024-11-12T19:33:18,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,181 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0af73c0ae884f2ba846b2904f7466d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731439995471 2024-11-12T19:33:18,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,187 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3194d706a7b451e93586c6c5c2cf5ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731439995571 2024-11-12T19:33:18,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,190 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f26248b16c5a409f93f106d1e9d16768, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:18,196 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:18,204 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112c4fd4bc9d68e49a8bc12a677c452ec04_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:18,213 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112c4fd4bc9d68e49a8bc12a677c452ec04_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:18,213 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112c4fd4bc9d68e49a8bc12a677c452ec04_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:18,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111282b8e113479649f6bb8f8baa57ee6f40_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439996982/Put/seqid=0 2024-11-12T19:33:18,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741938_1114 (size=12154) 2024-11-12T19:33:18,270 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#95 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:18,270 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/02fb4b8ce7e2465eb24bb3dd139b195b is 50, key is test_row_0/B:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:18,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440058268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,271 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:18,279 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111282b8e113479649f6bb8f8baa57ee6f40_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282b8e113479649f6bb8f8baa57ee6f40_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:18,284 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/105d535f518743ec9c1f82ba6a61780e, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:18,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/105d535f518743ec9c1f82ba6a61780e is 175, key is test_row_0/A:col10/1731439996982/Put/seqid=0 2024-11-12T19:33:18,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741939_1115 (size=4469) 2024-11-12T19:33:18,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741940_1116 (size=12104) 2024-11-12T19:33:18,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741941_1117 (size=30955) 2024-11-12T19:33:18,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,361 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/105d535f518743ec9c1f82ba6a61780e 2024-11-12T19:33:18,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440058375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/231b1e419b4a41fe8a385ce77841069d is 50, key is test_row_0/B:col10/1731439996982/Put/seqid=0 2024-11-12T19:33:18,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741942_1118 (size=12001) 2024-11-12T19:33:18,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440058585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:18,683 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,711 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#93 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:18,713 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/62c706cf63fc4316aa7d255bac8ce29c is 175, key is test_row_0/A:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:18,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741943_1119 (size=31058) 2024-11-12T19:33:18,761 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/02fb4b8ce7e2465eb24bb3dd139b195b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/02fb4b8ce7e2465eb24bb3dd139b195b 2024-11-12T19:33:18,775 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 02fb4b8ce7e2465eb24bb3dd139b195b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:18,775 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:18,775 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731439998163; duration=0sec 2024-11-12T19:33:18,776 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:18,776 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:18,776 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:18,778 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:18,779 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:18,779 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,779 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.2 K 2024-11-12T19:33:18,780 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb5baac74bb7420b8d028d1805820498, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731439995471 2024-11-12T19:33:18,780 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e720d9e706e7447d83d7609839ca4152, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731439995571 2024-11-12T19:33:18,781 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4537c2580ee3433b9edb47c0c854a75f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:18,813 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#97 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:18,814 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/d3d9c4b141af4835a56a7113f0c3e551 is 50, key is test_row_0/C:col10/1731439996875/Put/seqid=0 2024-11-12T19:33:18,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/231b1e419b4a41fe8a385ce77841069d 2024-11-12T19:33:18,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:18,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:18,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:18,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741944_1120 (size=12104) 2024-11-12T19:33:18,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e1540695c8a423984bfdee88715e3e3 is 50, key is test_row_0/C:col10/1731439996982/Put/seqid=0 2024-11-12T19:33:18,882 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/d3d9c4b141af4835a56a7113f0c3e551 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d3d9c4b141af4835a56a7113f0c3e551 2024-11-12T19:33:18,895 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into d3d9c4b141af4835a56a7113f0c3e551(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:18,896 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:18,896 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731439998163; duration=0sec 2024-11-12T19:33:18,896 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:18,896 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:18,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440058895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:18,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741945_1121 (size=12001) 2024-11-12T19:33:18,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e1540695c8a423984bfdee88715e3e3 2024-11-12T19:33:18,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/105d535f518743ec9c1f82ba6a61780e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e 2024-11-12T19:33:18,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e, entries=150, sequenceid=78, filesize=30.2 K 2024-11-12T19:33:18,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/231b1e419b4a41fe8a385ce77841069d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d 2024-11-12T19:33:19,019 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:19,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:19,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:19,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d, entries=150, sequenceid=78, filesize=11.7 K 2024-11-12T19:33:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e1540695c8a423984bfdee88715e3e3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3 2024-11-12T19:33:19,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3, entries=150, sequenceid=78, filesize=11.7 K 2024-11-12T19:33:19,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5e91f676554be63c9f656bc420de8a2a in 910ms, sequenceid=78, compaction requested=false 2024-11-12T19:33:19,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:19,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:19,156 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/62c706cf63fc4316aa7d255bac8ce29c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c 2024-11-12T19:33:19,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125f5857b2a4f1409f83ade1a8406a96e7_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,177 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 62c706cf63fc4316aa7d255bac8ce29c(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-12T19:33:19,177 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:19,177 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731439998163; duration=1sec 2024-11-12T19:33:19,177 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:19,177 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:19,187 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:19,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:19,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:19,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741946_1122 (size=14594) 2024-11-12T19:33:19,231 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:19,242 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125f5857b2a4f1409f83ade1a8406a96e7_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f5857b2a4f1409f83ade1a8406a96e7_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:19,244 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/0c9f05a1fb54489e999a668375f7cc59, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:19,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/0c9f05a1fb54489e999a668375f7cc59 is 175, key is test_row_0/A:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741947_1123 (size=39549) 2024-11-12T19:33:19,286 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/0c9f05a1fb54489e999a668375f7cc59 2024-11-12T19:33:19,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440059286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440059295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/87d9f8fc0c9f425782a7ce65e565fe77 is 50, key is test_row_0/B:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741948_1124 (size=12001) 2024-11-12T19:33:19,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/87d9f8fc0c9f425782a7ce65e565fe77 2024-11-12T19:33:19,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:19,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:19,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:19,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:19,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4115cce62ced4873ae7e8cf2de23598c is 50, key is test_row_0/C:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741949_1125 (size=12001) 2024-11-12T19:33:19,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4115cce62ced4873ae7e8cf2de23598c 2024-11-12T19:33:19,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440059400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440059401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440059402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/0c9f05a1fb54489e999a668375f7cc59 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59 2024-11-12T19:33:19,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59, entries=200, sequenceid=93, filesize=38.6 K 2024-11-12T19:33:19,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/87d9f8fc0c9f425782a7ce65e565fe77 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77 2024-11-12T19:33:19,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77, entries=150, sequenceid=93, filesize=11.7 K 2024-11-12T19:33:19,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4115cce62ced4873ae7e8cf2de23598c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c 2024-11-12T19:33:19,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c, entries=150, sequenceid=93, filesize=11.7 K 2024-11-12T19:33:19,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5e91f676554be63c9f656bc420de8a2a in 313ms, sequenceid=93, compaction requested=true 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:19,459 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:19,459 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:19,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:19,461 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:19,461 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:19,461 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,461 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/02fb4b8ce7e2465eb24bb3dd139b195b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.3 K 2024-11-12T19:33:19,461 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:19,461 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:19,461 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,461 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=99.2 K 2024-11-12T19:33:19,462 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,462 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59] 2024-11-12T19:33:19,464 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 62c706cf63fc4316aa7d255bac8ce29c, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:19,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02fb4b8ce7e2465eb24bb3dd139b195b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:19,465 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 105d535f518743ec9c1f82ba6a61780e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731439996976 2024-11-12T19:33:19,466 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 231b1e419b4a41fe8a385ce77841069d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731439996976 2024-11-12T19:33:19,466 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c9f05a1fb54489e999a668375f7cc59, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:19,466 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87d9f8fc0c9f425782a7ce65e565fe77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:19,488 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:19,493 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:19,493 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/5eb0d4d84ce940878f61118236ee29e3 is 50, key is test_row_0/B:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,494 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112c222e2a1671d4489a7371d1028107834_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:19,498 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112c222e2a1671d4489a7371d1028107834_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:19,498 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112c222e2a1671d4489a7371d1028107834_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:19,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:19,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-12T19:33:19,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,512 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:33:19,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:19,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741950_1126 (size=12207) 2024-11-12T19:33:19,573 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/5eb0d4d84ce940878f61118236ee29e3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/5eb0d4d84ce940878f61118236ee29e3 2024-11-12T19:33:19,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741951_1127 (size=4469) 2024-11-12T19:33:19,578 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#102 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:19,578 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/9dcd38f2a2844538a3922838a626476b is 175, key is test_row_0/A:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112bec19271f228402682cc5f723982ec7d_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439999254/Put/seqid=0 2024-11-12T19:33:19,587 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 5eb0d4d84ce940878f61118236ee29e3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:19,587 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:19,587 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731439999459; duration=0sec 2024-11-12T19:33:19,593 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:19,593 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:19,593 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:19,594 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:19,595 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:19,595 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:19,595 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d3d9c4b141af4835a56a7113f0c3e551, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.3 K 2024-11-12T19:33:19,596 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3d9c4b141af4835a56a7113f0c3e551, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731439996756 2024-11-12T19:33:19,596 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e1540695c8a423984bfdee88715e3e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731439996976 2024-11-12T19:33:19,597 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4115cce62ced4873ae7e8cf2de23598c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:19,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:19,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:19,639 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:19,640 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/350157dc15404f6c986d8c091296a180 is 50, key is test_row_0/C:col10/1731439999143/Put/seqid=0 2024-11-12T19:33:19,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741953_1129 (size=12154) 2024-11-12T19:33:19,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741952_1128 (size=31161) 2024-11-12T19:33:19,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:19,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440059664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440059667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741954_1130 (size=12207) 2024-11-12T19:33:19,691 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/350157dc15404f6c986d8c091296a180 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/350157dc15404f6c986d8c091296a180 2024-11-12T19:33:19,718 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into 350157dc15404f6c986d8c091296a180(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:19,719 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:19,719 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731439999459; duration=0sec 2024-11-12T19:33:19,719 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:19,719 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:19,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440059773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440059774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440059811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440059811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,815 DEBUG [Thread-530 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4237 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:19,815 DEBUG [Thread-532 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4238 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:19,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440059980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:19,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440059984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:20,270 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112bec19271f228402682cc5f723982ec7d_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112bec19271f228402682cc5f723982ec7d_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:20,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/ef5393c18da74e35ab17abae0868eb9b, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:20,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/ef5393c18da74e35ab17abae0868eb9b is 175, key is test_row_0/A:col10/1731439999254/Put/seqid=0 2024-11-12T19:33:20,278 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/9dcd38f2a2844538a3922838a626476b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b 2024-11-12T19:33:20,287 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 9dcd38f2a2844538a3922838a626476b(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:20,287 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:20,287 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731439999459; duration=0sec 2024-11-12T19:33:20,287 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:20,287 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:20,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440060283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440060295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741955_1131 (size=30955) 2024-11-12T19:33:20,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:20,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440060418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,732 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/ef5393c18da74e35ab17abae0868eb9b 2024-11-12T19:33:20,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/1756c16580594f9b940e3767d208d7df is 50, key is test_row_0/B:col10/1731439999254/Put/seqid=0 2024-11-12T19:33:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741956_1132 (size=12001) 2024-11-12T19:33:20,780 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/1756c16580594f9b940e3767d208d7df 2024-11-12T19:33:20,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:20,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440060793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/8e2f633c54f140e1a0b0509544db6cda is 50, key is test_row_0/C:col10/1731439999254/Put/seqid=0 2024-11-12T19:33:20,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440060810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:20,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741957_1133 (size=12001) 2024-11-12T19:33:20,863 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/8e2f633c54f140e1a0b0509544db6cda 2024-11-12T19:33:20,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/ef5393c18da74e35ab17abae0868eb9b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b 2024-11-12T19:33:20,890 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b, entries=150, sequenceid=117, filesize=30.2 K 2024-11-12T19:33:20,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/1756c16580594f9b940e3767d208d7df as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df 2024-11-12T19:33:20,904 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df, entries=150, sequenceid=117, filesize=11.7 K 2024-11-12T19:33:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/8e2f633c54f140e1a0b0509544db6cda as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda 2024-11-12T19:33:20,921 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda, entries=150, sequenceid=117, filesize=11.7 K 2024-11-12T19:33:20,925 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5e91f676554be63c9f656bc420de8a2a in 1413ms, sequenceid=117, compaction requested=false 2024-11-12T19:33:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-12T19:33:20,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-12T19:33:20,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-12T19:33:20,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3900 sec 2024-11-12T19:33:20,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 3.4000 sec 2024-11-12T19:33:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-12T19:33:21,661 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-12T19:33:21,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-12T19:33:21,679 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:21,680 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:21,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:21,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:21,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:33:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:21,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:21,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:21,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:21,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:21,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:21,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:21,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:21,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111279ba45bdf9cf416b87e53f583e3b79d8_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:21,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:21,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:21,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741958_1134 (size=14744) 2024-11-12T19:33:21,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:21,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440061967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:21,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:21,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:21,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440061977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:21,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:21,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:21,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:21,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:21,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:21,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:21,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440062082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440062107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,166 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,290 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:22,300 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111279ba45bdf9cf416b87e53f583e3b79d8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111279ba45bdf9cf416b87e53f583e3b79d8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:22,305 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5db47cc4c6b04b778402e343e652aced, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:22,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5db47cc4c6b04b778402e343e652aced is 175, key is test_row_0/A:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:22,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440062304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440062314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741959_1135 (size=39699) 2024-11-12T19:33:22,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440062432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,446 DEBUG [Thread-536 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:22,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440062613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440062619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:22,644 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,748 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5db47cc4c6b04b778402e343e652aced 2024-11-12T19:33:22,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/ec9f66177d2446098d038a28b1c33a58 is 50, key is test_row_0/B:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:22,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741960_1136 (size=12151) 2024-11-12T19:33:22,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/ec9f66177d2446098d038a28b1c33a58 2024-11-12T19:33:22,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a12fe3fecf3b45159069e046f4114eb8 is 50, key is test_row_0/C:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:22,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741961_1137 (size=12151) 2024-11-12T19:33:22,961 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:22,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:22,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:22,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,124 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:23,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:23,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:23,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:23,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440063124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:23,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:23,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440063132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:23,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:23,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:23,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:23,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:23,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a12fe3fecf3b45159069e046f4114eb8 2024-11-12T19:33:23,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/5db47cc4c6b04b778402e343e652aced as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced 2024-11-12T19:33:23,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced, entries=200, sequenceid=134, filesize=38.8 K 2024-11-12T19:33:23,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/ec9f66177d2446098d038a28b1c33a58 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58 2024-11-12T19:33:23,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58, entries=150, sequenceid=134, filesize=11.9 K 2024-11-12T19:33:23,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a12fe3fecf3b45159069e046f4114eb8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8 2024-11-12T19:33:23,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8, entries=150, sequenceid=134, filesize=11.9 K 2024-11-12T19:33:23,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5e91f676554be63c9f656bc420de8a2a in 1593ms, sequenceid=134, compaction requested=true 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:23,412 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:23,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:23,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:23,423 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:23,423 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:23,423 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,423 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=99.4 K 2024-11-12T19:33:23,423 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,424 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced] 2024-11-12T19:33:23,428 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:23,428 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9dcd38f2a2844538a3922838a626476b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:23,428 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:23,428 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,428 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/5eb0d4d84ce940878f61118236ee29e3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.5 K 2024-11-12T19:33:23,428 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef5393c18da74e35ab17abae0868eb9b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731439999254 2024-11-12T19:33:23,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5db47cc4c6b04b778402e343e652aced, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999628 2024-11-12T19:33:23,430 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb0d4d84ce940878f61118236ee29e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:23,431 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1756c16580594f9b940e3767d208d7df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731439999254 2024-11-12T19:33:23,432 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ec9f66177d2446098d038a28b1c33a58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999660 2024-11-12T19:33:23,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:23,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-12T19:33:23,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,443 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:23,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:23,452 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:23,471 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:23,472 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/0e744cd9805448b599ff9d74d531524e is 50, key is test_row_0/B:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:23,477 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112b420412c1fc349eab872c2a307f5d16b_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:23,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411128cafaf22ebe2402a93c0f4e3d8267d13_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440001934/Put/seqid=0 2024-11-12T19:33:23,481 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112b420412c1fc349eab872c2a307f5d16b_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:23,481 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b420412c1fc349eab872c2a307f5d16b_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:23,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741962_1138 (size=12459) 2024-11-12T19:33:23,535 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/0e744cd9805448b599ff9d74d531524e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e744cd9805448b599ff9d74d531524e 2024-11-12T19:33:23,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741964_1140 (size=4469) 2024-11-12T19:33:23,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741963_1139 (size=12304) 2024-11-12T19:33:23,563 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#111 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:23,564 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/53d08e5ef0ad4278a67ea39ae037a01d is 175, key is test_row_0/A:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:23,565 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 0e744cd9805448b599ff9d74d531524e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:23,565 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:23,565 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731440003412; duration=0sec 2024-11-12T19:33:23,565 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:23,565 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:23,569 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:23,579 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:23,579 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:23,579 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:23,579 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/350157dc15404f6c986d8c091296a180, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.5 K 2024-11-12T19:33:23,580 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 350157dc15404f6c986d8c091296a180, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731439998236 2024-11-12T19:33:23,590 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e2f633c54f140e1a0b0509544db6cda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731439999254 2024-11-12T19:33:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741965_1141 (size=31413) 2024-11-12T19:33:23,595 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a12fe3fecf3b45159069e046f4114eb8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999660 2024-11-12T19:33:23,633 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/53d08e5ef0ad4278a67ea39ae037a01d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d 2024-11-12T19:33:23,664 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:23,665 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/ff63a99cb4d6472a9eaddb6b607b9a2e is 50, key is test_row_0/C:col10/1731439999661/Put/seqid=0 2024-11-12T19:33:23,671 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 53d08e5ef0ad4278a67ea39ae037a01d(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:23,671 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:23,671 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731440003412; duration=0sec 2024-11-12T19:33:23,671 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:23,671 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:23,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741966_1142 (size=12459) 2024-11-12T19:33:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:23,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:23,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:23,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440063923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:23,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440063925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:23,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:23,975 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411128cafaf22ebe2402a93c0f4e3d8267d13_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128cafaf22ebe2402a93c0f4e3d8267d13_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:23,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/f7fbdfc588e14c1fb194ef1aac72d389, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:23,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/f7fbdfc588e14c1fb194ef1aac72d389 is 175, key is test_row_0/A:col10/1731440001934/Put/seqid=0 2024-11-12T19:33:24,004 INFO [master/81d69e608036:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-12T19:33:24,004 INFO [master/81d69e608036:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-12T19:33:24,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741967_1143 (size=31105) 2024-11-12T19:33:24,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440064035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440064039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,138 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/ff63a99cb4d6472a9eaddb6b607b9a2e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/ff63a99cb4d6472a9eaddb6b607b9a2e 2024-11-12T19:33:24,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440064138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440064147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,191 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into ff63a99cb4d6472a9eaddb6b607b9a2e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:24,191 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731440003412; duration=0sec 2024-11-12T19:33:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:24,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440064246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440064249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,425 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/f7fbdfc588e14c1fb194ef1aac72d389 2024-11-12T19:33:24,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/fcca73aee9344ef98a6e9e6dee277943 is 50, key is test_row_0/B:col10/1731440001934/Put/seqid=0 2024-11-12T19:33:24,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741968_1144 (size=12151) 2024-11-12T19:33:24,496 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/fcca73aee9344ef98a6e9e6dee277943 2024-11-12T19:33:24,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a40e4b364aeb4bbdaa464cf5578a9570 is 50, key is test_row_0/C:col10/1731440001934/Put/seqid=0 2024-11-12T19:33:24,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440064554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440064554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:24,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741969_1145 (size=12151) 2024-11-12T19:33:24,571 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a40e4b364aeb4bbdaa464cf5578a9570 2024-11-12T19:33:24,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/f7fbdfc588e14c1fb194ef1aac72d389 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389 2024-11-12T19:33:24,634 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389, entries=150, sequenceid=156, filesize=30.4 K 2024-11-12T19:33:24,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/fcca73aee9344ef98a6e9e6dee277943 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943 2024-11-12T19:33:24,663 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943, entries=150, sequenceid=156, filesize=11.9 K 2024-11-12T19:33:24,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/a40e4b364aeb4bbdaa464cf5578a9570 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570 2024-11-12T19:33:24,692 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570, entries=150, sequenceid=156, filesize=11.9 K 2024-11-12T19:33:24,710 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 5e91f676554be63c9f656bc420de8a2a in 1267ms, sequenceid=156, compaction requested=false 2024-11-12T19:33:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-12T19:33:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-12T19:33:24,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-12T19:33:24,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0340 sec 2024-11-12T19:33:24,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.0420 sec 2024-11-12T19:33:25,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:25,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-12T19:33:25,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:25,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:25,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:25,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:25,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:25,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:25,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e9d2975cb97a470da8f49c4f367d54a8_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:25,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741970_1146 (size=17284) 2024-11-12T19:33:25,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440065356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440065356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440065463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440065470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:25,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440065671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,675 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e9d2975cb97a470da8f49c4f367d54a8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e9d2975cb97a470da8f49c4f367d54a8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:25,683 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/baea52df4c884b0797d0777e325a8a21, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:25,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440065677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/baea52df4c884b0797d0777e325a8a21 is 175, key is test_row_0/A:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:25,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741971_1147 (size=48389) 2024-11-12T19:33:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-12T19:33:25,802 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-12T19:33:25,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-12T19:33:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:25,814 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:25,816 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:25,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:25,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:25,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:25,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:25,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:25,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:25,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:25,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:25,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:25,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440065987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:25,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440065993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:26,125 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/baea52df4c884b0797d0777e325a8a21 2024-11-12T19:33:26,134 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:26,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:26,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/bf920771d57142fa88fcdcc32e089f1e is 50, key is test_row_0/B:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:26,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440066165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440066169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,171 DEBUG [Thread-528 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:26,172 DEBUG [Thread-534 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:26,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741972_1148 (size=12151) 2024-11-12T19:33:26,300 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:26,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:26,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:26,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:26,458 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:26,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:26,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:26,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:26,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50590 deadline: 1731440066475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,479 DEBUG [Thread-536 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8211 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:26,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:26,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440066500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:26,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440066505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:26,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/bf920771d57142fa88fcdcc32e089f1e 2024-11-12T19:33:26,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/3601d138f0434e4695a6bcddf4f2419d is 50, key is test_row_0/C:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:26,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:26,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741973_1149 (size=12151) 2024-11-12T19:33:26,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:26,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:26,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:26,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/3601d138f0434e4695a6bcddf4f2419d 2024-11-12T19:33:26,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/baea52df4c884b0797d0777e325a8a21 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21 2024-11-12T19:33:26,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21, entries=250, sequenceid=176, filesize=47.3 K 2024-11-12T19:33:26,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/bf920771d57142fa88fcdcc32e089f1e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e 2024-11-12T19:33:26,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e, entries=150, sequenceid=176, filesize=11.9 K 2024-11-12T19:33:26,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/3601d138f0434e4695a6bcddf4f2419d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d 2024-11-12T19:33:26,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d, entries=150, sequenceid=176, filesize=11.9 K 2024-11-12T19:33:26,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 5e91f676554be63c9f656bc420de8a2a in 1558ms, sequenceid=176, compaction requested=true 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-12T19:33:26,724 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:26,724 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:26,729 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110907 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:26,729 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:26,730 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,730 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=108.3 K 2024-11-12T19:33:26,730 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,730 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21] 2024-11-12T19:33:26,733 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53d08e5ef0ad4278a67ea39ae037a01d, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999660 2024-11-12T19:33:26,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:26,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:26,740 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7fbdfc588e14c1fb194ef1aac72d389, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440001895 2024-11-12T19:33:26,743 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting baea52df4c884b0797d0777e325a8a21, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:26,739 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,743 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/ff63a99cb4d6472a9eaddb6b607b9a2e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.9 K 2024-11-12T19:33:26,745 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ff63a99cb4d6472a9eaddb6b607b9a2e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999660 2024-11-12T19:33:26,749 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a40e4b364aeb4bbdaa464cf5578a9570, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440001895 2024-11-12T19:33:26,755 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 3601d138f0434e4695a6bcddf4f2419d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:26,780 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:26,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:26,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-12T19:33:26,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:26,787 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-12T19:33:26,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:26,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:26,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:26,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:26,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:26,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:26,803 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411129b6c1bbd0fff4e07aa38b42c4de55e44_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:26,806 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411129b6c1bbd0fff4e07aa38b42c4de55e44_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:26,806 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411129b6c1bbd0fff4e07aa38b42c4de55e44_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:26,839 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#121 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:26,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ab9c4dec6b1846239757ecd61bc995e8_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440005264/Put/seqid=0 2024-11-12T19:33:26,847 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/24c5f315decd415e904ad78dae9b8a2d is 50, key is test_row_0/C:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:26,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741974_1150 (size=4469) 2024-11-12T19:33:26,861 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#120 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:26,862 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/e82b88d0c89740de9445e9eecaf9d565 is 175, key is test_row_0/A:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:26,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741975_1151 (size=12304) 2024-11-12T19:33:26,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:26,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741976_1152 (size=12561) 2024-11-12T19:33:26,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741977_1153 (size=31515) 2024-11-12T19:33:27,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:27,333 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ab9c4dec6b1846239757ecd61bc995e8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ab9c4dec6b1846239757ecd61bc995e8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:27,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/1d230b9ecafe4dd081d7fdea2f74a3d6, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:27,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/1d230b9ecafe4dd081d7fdea2f74a3d6 is 175, key is test_row_0/A:col10/1731440005264/Put/seqid=0 2024-11-12T19:33:27,351 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/24c5f315decd415e904ad78dae9b8a2d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/24c5f315decd415e904ad78dae9b8a2d 2024-11-12T19:33:27,362 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/e82b88d0c89740de9445e9eecaf9d565 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565 2024-11-12T19:33:27,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741978_1154 (size=31105) 2024-11-12T19:33:27,369 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/1d230b9ecafe4dd081d7fdea2f74a3d6 2024-11-12T19:33:27,379 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into 24c5f315decd415e904ad78dae9b8a2d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:27,379 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:27,379 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731440006723; duration=0sec 2024-11-12T19:33:27,379 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:27,379 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:27,382 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:27,382 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into e82b88d0c89740de9445e9eecaf9d565(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:27,383 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:27,383 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731440006723; duration=0sec 2024-11-12T19:33:27,383 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:27,383 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:27,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/16bca27f108c4ad1ad00505bc61615af is 50, key is test_row_0/B:col10/1731440005264/Put/seqid=0 2024-11-12T19:33:27,391 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:27,391 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:27,391 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:27,392 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e744cd9805448b599ff9d74d531524e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=35.9 K 2024-11-12T19:33:27,392 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e744cd9805448b599ff9d74d531524e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731439999660 2024-11-12T19:33:27,393 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting fcca73aee9344ef98a6e9e6dee277943, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440001895 2024-11-12T19:33:27,393 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bf920771d57142fa88fcdcc32e089f1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741979_1155 (size=12151) 2024-11-12T19:33:27,419 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/16bca27f108c4ad1ad00505bc61615af 2024-11-12T19:33:27,422 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#124 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:27,423 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/dc24a2695d5b4bee97702f7bd19ac542 is 50, key is test_row_0/B:col10/1731440005065/Put/seqid=0 2024-11-12T19:33:27,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741980_1156 (size=12561) 2024-11-12T19:33:27,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e361c813e434d32a4ced37cba9e5eab is 50, key is test_row_0/C:col10/1731440005264/Put/seqid=0 2024-11-12T19:33:27,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:27,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:27,527 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/dc24a2695d5b4bee97702f7bd19ac542 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/dc24a2695d5b4bee97702f7bd19ac542 2024-11-12T19:33:27,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741981_1157 (size=12151) 2024-11-12T19:33:27,537 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e361c813e434d32a4ced37cba9e5eab 2024-11-12T19:33:27,546 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into dc24a2695d5b4bee97702f7bd19ac542(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:27,546 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:27,546 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731440006723; duration=0sec 2024-11-12T19:33:27,547 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:27,547 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:27,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/1d230b9ecafe4dd081d7fdea2f74a3d6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6 2024-11-12T19:33:27,580 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6, entries=150, sequenceid=195, filesize=30.4 K 2024-11-12T19:33:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/16bca27f108c4ad1ad00505bc61615af as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af 2024-11-12T19:33:27,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440067582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440067591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,605 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af, entries=150, sequenceid=195, filesize=11.9 K 2024-11-12T19:33:27,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/9e361c813e434d32a4ced37cba9e5eab as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab 2024-11-12T19:33:27,632 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab, entries=150, sequenceid=195, filesize=11.9 K 2024-11-12T19:33:27,637 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5e91f676554be63c9f656bc420de8a2a in 849ms, sequenceid=195, compaction requested=false 2024-11-12T19:33:27,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:27,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:27,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-12T19:33:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-12T19:33:27,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-12T19:33:27,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8220 sec 2024-11-12T19:33:27,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.8330 sec 2024-11-12T19:33:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:27,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:33:27,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:27,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:27,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:27,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:27,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:27,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:27,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111255e42a8f068243bb8822b3049b2c4744_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:27,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741982_1158 (size=14794) 2024-11-12T19:33:27,803 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:27,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440067801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440067802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,820 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111255e42a8f068243bb8822b3049b2c4744_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111255e42a8f068243bb8822b3049b2c4744_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:27,830 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95d48d53aa7741c687ea266938dafc2b, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:27,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95d48d53aa7741c687ea266938dafc2b is 175, key is test_row_0/A:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:27,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741983_1159 (size=39749) 2024-11-12T19:33:27,871 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95d48d53aa7741c687ea266938dafc2b 2024-11-12T19:33:27,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/d8db59c17f2340aa8c18e7f4cecdfb92 is 50, key is test_row_0/B:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:27,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440067905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440067915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-12T19:33:27,927 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-12T19:33:27,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:27,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741984_1160 (size=12151) 2024-11-12T19:33:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-12T19:33:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:27,951 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:27,954 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:27,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:28,113 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:28,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-12T19:33:28,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:28,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440068115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440068121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:28,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:28,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-12T19:33:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:28,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/d8db59c17f2340aa8c18e7f4cecdfb92 2024-11-12T19:33:28,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4b101ccb8ed44855b308c01fbc1949f8 is 50, key is test_row_0/C:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:28,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741985_1161 (size=12151) 2024-11-12T19:33:28,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4b101ccb8ed44855b308c01fbc1949f8 2024-11-12T19:33:28,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95d48d53aa7741c687ea266938dafc2b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b 2024-11-12T19:33:28,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b, entries=200, sequenceid=216, filesize=38.8 K 2024-11-12T19:33:28,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/d8db59c17f2340aa8c18e7f4cecdfb92 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92 2024-11-12T19:33:28,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92, entries=150, sequenceid=216, filesize=11.9 K 2024-11-12T19:33:28,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/4b101ccb8ed44855b308c01fbc1949f8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8 2024-11-12T19:33:28,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8, entries=150, sequenceid=216, filesize=11.9 K 2024-11-12T19:33:28,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 5e91f676554be63c9f656bc420de8a2a in 721ms, sequenceid=216, compaction requested=true 2024-11-12T19:33:28,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:28,423 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:28,423 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:28,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:28,426 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:28,426 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:28,426 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:28,426 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:28,426 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,426 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,426 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/dc24a2695d5b4bee97702f7bd19ac542, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.0 K 2024-11-12T19:33:28,426 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=100.0 K 2024-11-12T19:33:28,426 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,426 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b] 2024-11-12T19:33:28,427 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting dc24a2695d5b4bee97702f7bd19ac542, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:28,427 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e82b88d0c89740de9445e9eecaf9d565, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:28,428 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 16bca27f108c4ad1ad00505bc61615af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731440005264 2024-11-12T19:33:28,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d230b9ecafe4dd081d7fdea2f74a3d6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731440005264 2024-11-12T19:33:28,429 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d8db59c17f2340aa8c18e7f4cecdfb92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007572 2024-11-12T19:33:28,430 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95d48d53aa7741c687ea266938dafc2b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007565 2024-11-12T19:33:28,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:28,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:28,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-12T19:33:28,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,436 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-12T19:33:28,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:28,443 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:28,444 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:28,445 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/0e683270336a432980eda2bae5c15cc0 is 50, key is test_row_0/B:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:28,463 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111265aed038eb3d4a4ab57e1e920a0bb721_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:28,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111265aed038eb3d4a4ab57e1e920a0bb721_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:28,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111265aed038eb3d4a4ab57e1e920a0bb721_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:28,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741987_1163 (size=4469) 2024-11-12T19:33:28,473 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#130 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:28,474 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3623590aa8484715919a28baa148d9e2 is 175, key is test_row_0/A:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:28,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b05a0be4822748a2819a56f555a32141_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440008427/Put/seqid=0 2024-11-12T19:33:28,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741986_1162 (size=12663) 2024-11-12T19:33:28,502 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/0e683270336a432980eda2bae5c15cc0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e683270336a432980eda2bae5c15cc0 2024-11-12T19:33:28,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440068497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440068503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,512 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 0e683270336a432980eda2bae5c15cc0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:28,512 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:28,512 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731440008423; duration=0sec 2024-11-12T19:33:28,512 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:28,512 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:28,512 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:28,514 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:28,514 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:28,514 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:28,514 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/24c5f315decd415e904ad78dae9b8a2d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.0 K 2024-11-12T19:33:28,515 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 24c5f315decd415e904ad78dae9b8a2d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731440003839 2024-11-12T19:33:28,516 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e361c813e434d32a4ced37cba9e5eab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731440005264 2024-11-12T19:33:28,516 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b101ccb8ed44855b308c01fbc1949f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007572 2024-11-12T19:33:28,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741988_1164 (size=31617) 2024-11-12T19:33:28,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741989_1165 (size=14794) 2024-11-12T19:33:28,544 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3623590aa8484715919a28baa148d9e2 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2 2024-11-12T19:33:28,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:28,548 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:28,548 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/3d84ea62c1ae41bbb9063c2977671c3d is 50, key is test_row_0/C:col10/1731440007695/Put/seqid=0 2024-11-12T19:33:28,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:28,554 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b05a0be4822748a2819a56f555a32141_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b05a0be4822748a2819a56f555a32141_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:28,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/b1b18f36237847ac926fa9cd02e510ed, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:28,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/b1b18f36237847ac926fa9cd02e510ed is 175, key is test_row_0/A:col10/1731440008427/Put/seqid=0 2024-11-12T19:33:28,558 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 3623590aa8484715919a28baa148d9e2(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:28,558 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:28,558 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731440008423; duration=0sec 2024-11-12T19:33:28,559 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:28,559 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:28,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741990_1166 (size=12663) 2024-11-12T19:33:28,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741991_1167 (size=39749) 2024-11-12T19:33:28,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440068605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,606 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/b1b18f36237847ac926fa9cd02e510ed 2024-11-12T19:33:28,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440068609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/4d794c1e1fb54a929ad8bb8bbbaf633e is 50, key is test_row_0/B:col10/1731440008427/Put/seqid=0 2024-11-12T19:33:28,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741992_1168 (size=12151) 2024-11-12T19:33:28,649 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/4d794c1e1fb54a929ad8bb8bbbaf633e 2024-11-12T19:33:28,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/2d6f034f0d1a421b83b2368d5047a4b8 is 50, key is test_row_0/C:col10/1731440008427/Put/seqid=0 2024-11-12T19:33:28,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741993_1169 (size=12151) 2024-11-12T19:33:28,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440068811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:28,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:28,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440068816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,033 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/3d84ea62c1ae41bbb9063c2977671c3d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3d84ea62c1ae41bbb9063c2977671c3d 2024-11-12T19:33:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:29,075 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into 3d84ea62c1ae41bbb9063c2977671c3d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:29,075 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:29,075 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731440008423; duration=0sec 2024-11-12T19:33:29,075 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:29,075 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:29,115 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/2d6f034f0d1a421b83b2368d5047a4b8 2024-11-12T19:33:29,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440069121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440069121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/b1b18f36237847ac926fa9cd02e510ed as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed 2024-11-12T19:33:29,138 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed, entries=200, sequenceid=237, filesize=38.8 K 2024-11-12T19:33:29,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/4d794c1e1fb54a929ad8bb8bbbaf633e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e 2024-11-12T19:33:29,150 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e, entries=150, sequenceid=237, filesize=11.9 K 2024-11-12T19:33:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/2d6f034f0d1a421b83b2368d5047a4b8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8 2024-11-12T19:33:29,162 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8, entries=150, sequenceid=237, filesize=11.9 K 2024-11-12T19:33:29,166 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5e91f676554be63c9f656bc420de8a2a in 730ms, sequenceid=237, compaction requested=false 2024-11-12T19:33:29,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:29,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:29,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-12T19:33:29,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-12T19:33:29,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-12T19:33:29,178 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2200 sec 2024-11-12T19:33:29,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.2470 sec 2024-11-12T19:33:29,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-12T19:33:29,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:29,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:29,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:29,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:29,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:29,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:29,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112efb00fd72dc243d3914b643ac3922937_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:29,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741994_1170 (size=14794) 2024-11-12T19:33:29,671 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:29,677 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112efb00fd72dc243d3914b643ac3922937_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112efb00fd72dc243d3914b643ac3922937_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:29,679 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8596f2569a494c84977ce26ea0fe97be, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:29,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8596f2569a494c84977ce26ea0fe97be is 175, key is test_row_0/A:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:29,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440069684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440069691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741995_1171 (size=39749) 2024-11-12T19:33:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440069803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440069803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440070007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440070007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-12T19:33:30,054 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-12T19:33:30,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:30,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50532 deadline: 1731440070194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,201 DEBUG [Thread-528 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8225 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:30,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50552 deadline: 1731440070205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,212 DEBUG [Thread-534 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8245 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:30,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-12T19:33:30,256 INFO [AsyncFSWAL-0-hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData-prefix:81d69e608036,46265,1731439955074 {}] wal.AbstractFSWAL(1183): Slow sync cost: 199 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43501,DS-f1c7dfe2-aaf6-4cdb-b091-3e481db414d8,DISK]] 2024-11-12T19:33:30,256 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8596f2569a494c84977ce26ea0fe97be 2024-11-12T19:33:30,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:30,267 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:30,271 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:30,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:30,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/2d064ffe9cc54d76bff3a8cc1594e06b is 50, key is test_row_0/B:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:30,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440070319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440070319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741996_1172 (size=12151) 2024-11-12T19:33:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:30,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:30,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:30,598 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:30,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/2d064ffe9cc54d76bff3a8cc1594e06b 2024-11-12T19:33:30,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:30,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:30,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:30,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/f95f23a082224ec5ac7012392675339c is 50, key is test_row_0/C:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:30,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440070821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741997_1173 (size=12151) 2024-11-12T19:33:30,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:30,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440070826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:30,910 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:30,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:30,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:30,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:30,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:31,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,229 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:31,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/f95f23a082224ec5ac7012392675339c 2024-11-12T19:33:31,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8596f2569a494c84977ce26ea0fe97be as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be 2024-11-12T19:33:31,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be, entries=200, sequenceid=256, filesize=38.8 K 2024-11-12T19:33:31,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/2d064ffe9cc54d76bff3a8cc1594e06b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b 2024-11-12T19:33:31,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b, entries=150, sequenceid=256, filesize=11.9 K 2024-11-12T19:33:31,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/f95f23a082224ec5ac7012392675339c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c 2024-11-12T19:33:31,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c, entries=150, sequenceid=256, filesize=11.9 K 2024-11-12T19:33:31,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5e91f676554be63c9f656bc420de8a2a in 1732ms, sequenceid=256, compaction requested=true 2024-11-12T19:33:31,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:31,358 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:31,359 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:31,359 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:31,359 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:31,359 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,360 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=108.5 K 2024-11-12T19:33:31,360 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,360 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be] 2024-11-12T19:33:31,363 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:31,363 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3623590aa8484715919a28baa148d9e2, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007572 2024-11-12T19:33:31,363 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:31,363 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,364 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e683270336a432980eda2bae5c15cc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.1 K 2024-11-12T19:33:31,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:31,367 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1b18f36237847ac926fa9cd02e510ed, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731440007766 2024-11-12T19:33:31,367 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e683270336a432980eda2bae5c15cc0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007572 2024-11-12T19:33:31,369 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8596f2569a494c84977ce26ea0fe97be, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:31,371 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d794c1e1fb54a929ad8bb8bbbaf633e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731440007786 2024-11-12T19:33:31,374 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d064ffe9cc54d76bff3a8cc1594e06b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:31,388 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:31,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-12T19:33:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,390 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:31,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:31,419 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:31,425 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#139 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:31,426 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/049ee12e96a14aa5bfa2e9b74ae54c33 is 50, key is test_row_0/B:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:31,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112c47d0fe613cc4806852d6caffe8f1183_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440009665/Put/seqid=0 2024-11-12T19:33:31,440 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411124539bae71b6643f59e86d773c7be55a4_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:31,442 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411124539bae71b6643f59e86d773c7be55a4_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:31,442 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411124539bae71b6643f59e86d773c7be55a4_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:31,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741998_1174 (size=12765) 2024-11-12T19:33:31,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741999_1175 (size=12454) 2024-11-12T19:33:31,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:31,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742000_1176 (size=4469) 2024-11-12T19:33:31,520 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112c47d0fe613cc4806852d6caffe8f1183_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112c47d0fe613cc4806852d6caffe8f1183_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:31,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fbc095d6c7ec4620a40249d5d216eed8, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:31,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fbc095d6c7ec4620a40249d5d216eed8 is 175, key is test_row_0/A:col10/1731440009665/Put/seqid=0 2024-11-12T19:33:31,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742001_1177 (size=31255) 2024-11-12T19:33:31,567 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fbc095d6c7ec4620a40249d5d216eed8 2024-11-12T19:33:31,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/7716864aae8543bc9485c4f719e4841f is 50, key is test_row_0/B:col10/1731440009665/Put/seqid=0 2024-11-12T19:33:31,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742002_1178 (size=12301) 2024-11-12T19:33:31,675 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/7716864aae8543bc9485c4f719e4841f 2024-11-12T19:33:31,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/55c85fb5caf14a989dfb14f1abe62d7b is 50, key is test_row_0/C:col10/1731440009665/Put/seqid=0 2024-11-12T19:33:31,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742003_1179 (size=12301) 2024-11-12T19:33:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:31,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:31,899 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/049ee12e96a14aa5bfa2e9b74ae54c33 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/049ee12e96a14aa5bfa2e9b74ae54c33 2024-11-12T19:33:31,904 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#138 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:31,904 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/84c898bb4e584fd8b680bca719e5bdfd is 175, key is test_row_0/A:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:31,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440071922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:31,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440071922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:31,931 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 049ee12e96a14aa5bfa2e9b74ae54c33(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:31,931 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:31,931 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731440011359; duration=0sec 2024-11-12T19:33:31,931 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:31,931 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:31,931 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:31,937 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:31,937 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:31,938 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:31,938 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3d84ea62c1ae41bbb9063c2977671c3d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.1 K 2024-11-12T19:33:31,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d84ea62c1ae41bbb9063c2977671c3d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731440007572 2024-11-12T19:33:31,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d6f034f0d1a421b83b2368d5047a4b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731440007786 2024-11-12T19:33:31,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742004_1180 (size=31719) 2024-11-12T19:33:31,950 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f95f23a082224ec5ac7012392675339c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:31,965 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#143 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:31,965 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/88736840755147b290bde9e4d68b8258 is 50, key is test_row_0/C:col10/1731440009624/Put/seqid=0 2024-11-12T19:33:32,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742005_1181 (size=12765) 2024-11-12T19:33:32,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440072035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440072039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,062 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/88736840755147b290bde9e4d68b8258 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/88736840755147b290bde9e4d68b8258 2024-11-12T19:33:32,091 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into 88736840755147b290bde9e4d68b8258(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:32,091 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:32,091 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731440011359; duration=0sec 2024-11-12T19:33:32,091 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:32,091 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:32,133 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/55c85fb5caf14a989dfb14f1abe62d7b 2024-11-12T19:33:32,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fbc095d6c7ec4620a40249d5d216eed8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8 2024-11-12T19:33:32,157 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8, entries=150, sequenceid=276, filesize=30.5 K 2024-11-12T19:33:32,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/7716864aae8543bc9485c4f719e4841f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f 2024-11-12T19:33:32,177 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f, entries=150, sequenceid=276, filesize=12.0 K 2024-11-12T19:33:32,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/55c85fb5caf14a989dfb14f1abe62d7b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b 2024-11-12T19:33:32,194 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b, entries=150, sequenceid=276, filesize=12.0 K 2024-11-12T19:33:32,195 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 5e91f676554be63c9f656bc420de8a2a in 806ms, sequenceid=276, compaction requested=false 2024-11-12T19:33:32,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:32,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-12T19:33:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-12T19:33:32,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-12T19:33:32,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9260 sec 2024-11-12T19:33:32,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.1450 sec 2024-11-12T19:33:32,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:32,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127ba0f788d1aa460690afdfa4aa3c485b_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742006_1182 (size=12454) 2024-11-12T19:33:32,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440072291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440072296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-12T19:33:32,366 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-12T19:33:32,367 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/84c898bb4e584fd8b680bca719e5bdfd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd 2024-11-12T19:33:32,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:32,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-12T19:33:32,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:32,371 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:32,372 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:32,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:32,389 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 84c898bb4e584fd8b680bca719e5bdfd(size=31.0 K), total size for store is 61.5 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-12T19:33:32,389 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:32,389 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731440011358; duration=1sec 2024-11-12T19:33:32,389 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:32,389 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:32,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440072403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440072411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:32,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:32,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-12T19:33:32,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:32,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440072606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440072617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,667 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:32,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:32,674 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127ba0f788d1aa460690afdfa4aa3c485b_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127ba0f788d1aa460690afdfa4aa3c485b_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:32,676 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3b0b2e177a9e44369a0be9e2d795fbf7, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:32,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3b0b2e177a9e44369a0be9e2d795fbf7 is 175, key is test_row_0/A:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:32,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-12T19:33:32,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:32,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:32,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742007_1183 (size=31255) 2024-11-12T19:33:32,701 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3b0b2e177a9e44369a0be9e2d795fbf7 2024-11-12T19:33:32,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/71017523503c4923a5261f2b62ea0297 is 50, key is test_row_0/B:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742008_1184 (size=12301) 2024-11-12T19:33:32,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/71017523503c4923a5261f2b62ea0297 2024-11-12T19:33:32,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 is 50, key is test_row_0/C:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742009_1185 (size=12301) 2024-11-12T19:33:32,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 2024-11-12T19:33:32,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/3b0b2e177a9e44369a0be9e2d795fbf7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7 2024-11-12T19:33:32,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7, entries=150, sequenceid=296, filesize=30.5 K 2024-11-12T19:33:32,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/71017523503c4923a5261f2b62ea0297 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297 2024-11-12T19:33:32,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297, entries=150, sequenceid=296, filesize=12.0 K 2024-11-12T19:33:32,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 2024-11-12T19:33:32,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0, entries=150, sequenceid=296, filesize=12.0 K 2024-11-12T19:33:32,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e91f676554be63c9f656bc420de8a2a in 558ms, sequenceid=296, compaction requested=true 2024-11-12T19:33:32,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:32,802 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:32,803 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:32,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:32,805 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:32,805 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:32,806 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:32,806 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:32,806 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,806 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,806 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=92.0 K 2024-11-12T19:33:32,806 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/049ee12e96a14aa5bfa2e9b74ae54c33, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.5 K 2024-11-12T19:33:32,806 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,806 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7] 2024-11-12T19:33:32,806 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c898bb4e584fd8b680bca719e5bdfd, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:32,806 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 049ee12e96a14aa5bfa2e9b74ae54c33, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:32,807 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7716864aae8543bc9485c4f719e4841f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731440009662 2024-11-12T19:33:32,807 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbc095d6c7ec4620a40249d5d216eed8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731440009662 2024-11-12T19:33:32,807 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b0b2e177a9e44369a0be9e2d795fbf7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:32,807 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 71017523503c4923a5261f2b62ea0297, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:32,824 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:32,829 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#148 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:32,830 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/932830910c844eeebaee884ca4422e4d is 50, key is test_row_0/B:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,832 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111204ab7c63680547dfbfe7bf393fde7f0c_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:32,834 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111204ab7c63680547dfbfe7bf393fde7f0c_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:32,834 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111204ab7c63680547dfbfe7bf393fde7f0c_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:32,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:32,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-12T19:33:32,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:32,851 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:33:32,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:32,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:32,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:32,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:32,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742010_1186 (size=13017) 2024-11-12T19:33:32,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742011_1187 (size=4469) 2024-11-12T19:33:32,871 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#147 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:32,871 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/430eb253525945598b584b4b26a62bb6 is 175, key is test_row_0/A:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:32,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111269707720ed0a4770ba1eb4d6e8430ac4_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440012290/Put/seqid=0 2024-11-12T19:33:32,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:32,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:32,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742012_1188 (size=31971) 2024-11-12T19:33:32,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742013_1189 (size=12454) 2024-11-12T19:33:32,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440072970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:32,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440072971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440073072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440073074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,274 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/932830910c844eeebaee884ca4422e4d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/932830910c844eeebaee884ca4422e4d 2024-11-12T19:33:33,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440073277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440073280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,289 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into 932830910c844eeebaee884ca4422e4d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:33,290 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:33,290 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=13, startTime=1731440012803; duration=0sec 2024-11-12T19:33:33,290 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:33,290 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:33,290 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:33,292 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:33,292 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:33,292 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:33,292 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/88736840755147b290bde9e4d68b8258, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=36.5 K 2024-11-12T19:33:33,293 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 88736840755147b290bde9e4d68b8258, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731440008485 2024-11-12T19:33:33,293 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 55c85fb5caf14a989dfb14f1abe62d7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731440009662 2024-11-12T19:33:33,293 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b5e1bbd97b6d47dc9ff0a4b4cf6c07b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:33,306 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:33,307 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/140a9fe6e3b5432e926c60617533ae32 is 50, key is test_row_0/C:col10/1731440012242/Put/seqid=0 2024-11-12T19:33:33,324 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/430eb253525945598b584b4b26a62bb6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6 2024-11-12T19:33:33,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:33,332 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into 430eb253525945598b584b4b26a62bb6(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:33,332 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111269707720ed0a4770ba1eb4d6e8430ac4_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269707720ed0a4770ba1eb4d6e8430ac4_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:33,332 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:33,332 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=13, startTime=1731440012802; duration=0sec 2024-11-12T19:33:33,332 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:33,332 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:33,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8be38787d12442bea5c31c773624dfaf, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8be38787d12442bea5c31c773624dfaf is 175, key is test_row_0/A:col10/1731440012290/Put/seqid=0 2024-11-12T19:33:33,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742014_1190 (size=13017) 2024-11-12T19:33:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742015_1191 (size=31255) 2024-11-12T19:33:33,356 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/140a9fe6e3b5432e926c60617533ae32 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/140a9fe6e3b5432e926c60617533ae32 2024-11-12T19:33:33,362 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into 140a9fe6e3b5432e926c60617533ae32(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:33,362 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:33,362 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=13, startTime=1731440012803; duration=0sec 2024-11-12T19:33:33,362 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:33,362 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:33,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440073582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440073586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:33,755 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8be38787d12442bea5c31c773624dfaf 2024-11-12T19:33:33,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/92f8e3e4f42c499fb3a448b83f774c4f is 50, key is test_row_0/B:col10/1731440012290/Put/seqid=0 2024-11-12T19:33:33,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742016_1192 (size=12301) 2024-11-12T19:33:33,777 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/92f8e3e4f42c499fb3a448b83f774c4f 2024-11-12T19:33:33,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/14812155b45249d3aa578f1cd8cffd42 is 50, key is test_row_0/C:col10/1731440012290/Put/seqid=0 2024-11-12T19:33:33,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742017_1193 (size=12301) 2024-11-12T19:33:34,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:33:34,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440074086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:34,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440074089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:34,189 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/14812155b45249d3aa578f1cd8cffd42 2024-11-12T19:33:34,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/8be38787d12442bea5c31c773624dfaf as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf 2024-11-12T19:33:34,206 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf, entries=150, sequenceid=315, filesize=30.5 K 2024-11-12T19:33:34,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/92f8e3e4f42c499fb3a448b83f774c4f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f 2024-11-12T19:33:34,221 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f, entries=150, sequenceid=315, filesize=12.0 K 2024-11-12T19:33:34,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/14812155b45249d3aa578f1cd8cffd42 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42 2024-11-12T19:33:34,233 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42, entries=150, sequenceid=315, filesize=12.0 K 2024-11-12T19:33:34,250 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e91f676554be63c9f656bc420de8a2a in 1399ms, sequenceid=315, compaction requested=false 2024-11-12T19:33:34,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:34,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:34,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-12T19:33:34,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-12T19:33:34,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-12T19:33:34,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8830 sec 2024-11-12T19:33:34,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.8970 sec 2024-11-12T19:33:34,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-12T19:33:34,475 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-12T19:33:34,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:34,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-12T19:33:34,484 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:34,485 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:34,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:34,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-12T19:33:34,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-12T19:33:34,638 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:34,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-12T19:33:34,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:34,638 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:34,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b0d6fde1f43245da8806785e93b25aa6_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440012970/Put/seqid=0 2024-11-12T19:33:34,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742018_1194 (size=12454) 2024-11-12T19:33:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-12T19:33:35,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:35,091 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b0d6fde1f43245da8806785e93b25aa6_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b0d6fde1f43245da8806785e93b25aa6_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95070791cc3e40e7a1b8c1ee4dafb92d, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-12T19:33:35,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95070791cc3e40e7a1b8c1ee4dafb92d is 175, key is test_row_0/A:col10/1731440012970/Put/seqid=0 2024-11-12T19:33:35,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. as already flushing 2024-11-12T19:33:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:35,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742019_1195 (size=31255) 2024-11-12T19:33:35,125 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=336, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95070791cc3e40e7a1b8c1ee4dafb92d 2024-11-12T19:33:35,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440075132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:35,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/36a1559c126d45fbb1634214d5583837 is 50, key is test_row_0/B:col10/1731440012970/Put/seqid=0 2024-11-12T19:33:35,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440075135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:35,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742020_1196 (size=12301) 2024-11-12T19:33:35,185 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/36a1559c126d45fbb1634214d5583837 2024-11-12T19:33:35,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/69c4edd6449a4ce085eb3d7b6db51709 is 50, key is test_row_0/C:col10/1731440012970/Put/seqid=0 2024-11-12T19:33:35,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742021_1197 (size=12301) 2024-11-12T19:33:35,231 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/69c4edd6449a4ce085eb3d7b6db51709 2024-11-12T19:33:35,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/95070791cc3e40e7a1b8c1ee4dafb92d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d 2024-11-12T19:33:35,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:35,242 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d, entries=150, sequenceid=336, filesize=30.5 K 2024-11-12T19:33:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/36a1559c126d45fbb1634214d5583837 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837 2024-11-12T19:33:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50564 deadline: 1731440075237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:35,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50498 deadline: 1731440075239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:35,250 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837, entries=150, sequenceid=336, filesize=12.0 K 2024-11-12T19:33:35,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/69c4edd6449a4ce085eb3d7b6db51709 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709 2024-11-12T19:33:35,266 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709, entries=150, sequenceid=336, filesize=12.0 K 2024-11-12T19:33:35,269 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e91f676554be63c9f656bc420de8a2a in 631ms, sequenceid=336, compaction requested=true 2024-11-12T19:33:35,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:35,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:35,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-12T19:33:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-12T19:33:35,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-12T19:33:35,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 794 msec 2024-11-12T19:33:35,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 805 msec 2024-11-12T19:33:35,396 DEBUG [Thread-545 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x250a1de4 to 127.0.0.1:60358 2024-11-12T19:33:35,396 DEBUG [Thread-545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,397 DEBUG [Thread-539 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40832d66 to 127.0.0.1:60358 2024-11-12T19:33:35,398 DEBUG [Thread-539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,402 DEBUG [Thread-543 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c6fdab to 127.0.0.1:60358 2024-11-12T19:33:35,402 DEBUG [Thread-543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,402 DEBUG [Thread-541 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29dad7a8 to 127.0.0.1:60358 2024-11-12T19:33:35,402 DEBUG [Thread-541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:35,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-12T19:33:35,464 DEBUG [Thread-532 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x57449e06 to 127.0.0.1:60358 2024-11-12T19:33:35,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:35,464 DEBUG [Thread-532 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:35,465 DEBUG [Thread-530 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x515fd839 to 127.0.0.1:60358 2024-11-12T19:33:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:35,465 DEBUG [Thread-530 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:35,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b8cb070cb7f74c24adc7b7005b8cd4a6_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_0/A:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:35,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742022_1198 (size=12454) 2024-11-12T19:33:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-12T19:33:35,594 INFO [Thread-538 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-12T19:33:35,903 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:35,907 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b8cb070cb7f74c24adc7b7005b8cd4a6_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b8cb070cb7f74c24adc7b7005b8cd4a6_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:35,908 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fe34da5b91344534a82e6f005c3d4a15, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:35,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fe34da5b91344534a82e6f005c3d4a15 is 175, key is test_row_0/A:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:35,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742023_1199 (size=31255) 2024-11-12T19:33:36,314 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fe34da5b91344534a82e6f005c3d4a15 2024-11-12T19:33:36,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/a64764b7362f432693e3e375d525dc53 is 50, key is test_row_0/B:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742024_1200 (size=12301) 2024-11-12T19:33:36,570 DEBUG [Thread-536 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74be9bc0 to 127.0.0.1:60358 2024-11-12T19:33:36,570 DEBUG [Thread-536 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:36,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/a64764b7362f432693e3e375d525dc53 2024-11-12T19:33:36,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/504e307221a9488dad073bb6614c0677 is 50, key is test_row_0/C:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742025_1201 (size=12301) 2024-11-12T19:33:37,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/504e307221a9488dad073bb6614c0677 2024-11-12T19:33:37,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/fe34da5b91344534a82e6f005c3d4a15 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15 2024-11-12T19:33:37,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15, entries=150, sequenceid=356, filesize=30.5 K 2024-11-12T19:33:37,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/a64764b7362f432693e3e375d525dc53 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53 2024-11-12T19:33:37,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53, entries=150, sequenceid=356, filesize=12.0 K 2024-11-12T19:33:37,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/504e307221a9488dad073bb6614c0677 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677 2024-11-12T19:33:37,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677, entries=150, sequenceid=356, filesize=12.0 K 2024-11-12T19:33:37,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=6.71 KB/6870 for 5e91f676554be63c9f656bc420de8a2a in 1718ms, sequenceid=356, compaction requested=true 2024-11-12T19:33:37,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:37,183 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:37,183 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e91f676554be63c9f656bc420de8a2a:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:37,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:37,184 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125736 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:37,184 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:37,185 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/B is initiating minor compaction (all files) 2024-11-12T19:33:37,185 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/A is initiating minor compaction (all files) 2024-11-12T19:33:37,185 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/B in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:37,185 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/932830910c844eeebaee884ca4422e4d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=48.8 K 2024-11-12T19:33:37,185 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 932830910c844eeebaee884ca4422e4d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:37,186 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/A in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:37,186 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 92f8e3e4f42c499fb3a448b83f774c4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440012288 2024-11-12T19:33:37,186 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=122.8 K 2024-11-12T19:33:37,186 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:37,186 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15] 2024-11-12T19:33:37,187 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a1559c126d45fbb1634214d5583837, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1731440012922 2024-11-12T19:33:37,187 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 430eb253525945598b584b4b26a62bb6, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:37,188 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a64764b7362f432693e3e375d525dc53, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1731440015131 2024-11-12T19:33:37,188 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be38787d12442bea5c31c773624dfaf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440012288 2024-11-12T19:33:37,188 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95070791cc3e40e7a1b8c1ee4dafb92d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1731440012922 2024-11-12T19:33:37,189 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe34da5b91344534a82e6f005c3d4a15, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1731440015131 2024-11-12T19:33:37,205 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#B#compaction#159 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:37,206 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/b26f1f9b382348c2a1d187ef136ddcd2 is 50, key is test_row_0/B:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:37,211 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:37,219 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111267fda23cde5649e0a195040a4f878752_5e91f676554be63c9f656bc420de8a2a store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:37,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742026_1202 (size=13153) 2024-11-12T19:33:37,231 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/b26f1f9b382348c2a1d187ef136ddcd2 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b26f1f9b382348c2a1d187ef136ddcd2 2024-11-12T19:33:37,241 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/B of 5e91f676554be63c9f656bc420de8a2a into b26f1f9b382348c2a1d187ef136ddcd2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:37,241 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:37,241 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/B, priority=12, startTime=1731440017183; duration=0sec 2024-11-12T19:33:37,241 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:37,241 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:B 2024-11-12T19:33:37,241 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:37,246 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:37,247 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 5e91f676554be63c9f656bc420de8a2a/C is initiating minor compaction (all files) 2024-11-12T19:33:37,247 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e91f676554be63c9f656bc420de8a2a/C in TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:37,247 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/140a9fe6e3b5432e926c60617533ae32, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp, totalSize=48.8 K 2024-11-12T19:33:37,248 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 140a9fe6e3b5432e926c60617533ae32, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731440011885 2024-11-12T19:33:37,251 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 14812155b45249d3aa578f1cd8cffd42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440012288 2024-11-12T19:33:37,253 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 69c4edd6449a4ce085eb3d7b6db51709, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1731440012922 2024-11-12T19:33:37,255 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 504e307221a9488dad073bb6614c0677, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1731440015131 2024-11-12T19:33:37,268 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111267fda23cde5649e0a195040a4f878752_5e91f676554be63c9f656bc420de8a2a, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:37,268 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111267fda23cde5649e0a195040a4f878752_5e91f676554be63c9f656bc420de8a2a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:37,292 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#C#compaction#161 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:37,293 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/e7963d8b7e5b43ecb0351be32f2e8b84 is 50, key is test_row_0/C:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742027_1203 (size=4469) 2024-11-12T19:33:37,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742028_1204 (size=13153) 2024-11-12T19:33:37,330 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/e7963d8b7e5b43ecb0351be32f2e8b84 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e7963d8b7e5b43ecb0351be32f2e8b84 2024-11-12T19:33:37,343 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/C of 5e91f676554be63c9f656bc420de8a2a into e7963d8b7e5b43ecb0351be32f2e8b84(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:37,343 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:37,343 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/C, priority=12, startTime=1731440017183; duration=0sec 2024-11-12T19:33:37,343 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:37,343 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:C 2024-11-12T19:33:37,703 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e91f676554be63c9f656bc420de8a2a#A#compaction#160 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:37,704 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/e122407d2dde4e3d88a7c4e07bc67c76 is 175, key is test_row_0/A:col10/1731440015131/Put/seqid=0 2024-11-12T19:33:37,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742029_1205 (size=32107) 2024-11-12T19:33:38,115 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/e122407d2dde4e3d88a7c4e07bc67c76 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e122407d2dde4e3d88a7c4e07bc67c76 2024-11-12T19:33:38,121 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e91f676554be63c9f656bc420de8a2a/A of 5e91f676554be63c9f656bc420de8a2a into e122407d2dde4e3d88a7c4e07bc67c76(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:38,122 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:38,122 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a., storeName=5e91f676554be63c9f656bc420de8a2a/A, priority=12, startTime=1731440017182; duration=0sec 2024-11-12T19:33:38,122 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:38,122 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e91f676554be63c9f656bc420de8a2a:A 2024-11-12T19:33:40,216 DEBUG [Thread-534 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35b51e5d to 127.0.0.1:60358 2024-11-12T19:33:40,216 DEBUG [Thread-534 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:40,276 DEBUG [Thread-528 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebe01f4 to 127.0.0.1:60358 2024-11-12T19:33:40,276 DEBUG [Thread-528 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2932 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2880 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1248 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3740 rows 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1215 2024-11-12T19:33:40,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3645 rows 2024-11-12T19:33:40,276 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:33:40,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df308e2 to 127.0.0.1:60358 2024-11-12T19:33:40,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:33:40,279 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:33:40,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:33:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:40,284 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440020284"}]},"ts":"1731440020284"} 2024-11-12T19:33:40,286 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:33:40,294 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:33:40,295 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:33:40,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, UNASSIGN}] 2024-11-12T19:33:40,298 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, UNASSIGN 2024-11-12T19:33:40,299 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:40,300 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:33:40,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:40,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:40,452 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing 5e91f676554be63c9f656bc420de8a2a, disabling compactions & flushes 2024-11-12T19:33:40,453 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. after waiting 0 ms 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:40,453 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing 5e91f676554be63c9f656bc420de8a2a 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=A 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=B 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:40,453 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e91f676554be63c9f656bc420de8a2a, store=C 2024-11-12T19:33:40,454 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:40,461 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111269ca30cfced249f88218a9c1b293340c_5e91f676554be63c9f656bc420de8a2a is 50, key is test_row_1/A:col10/1731440020274/Put/seqid=0 2024-11-12T19:33:40,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742030_1206 (size=9914) 2024-11-12T19:33:40,478 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:40,484 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111269ca30cfced249f88218a9c1b293340c_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269ca30cfced249f88218a9c1b293340c_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:40,485 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/22d5a7a79b7b4b4f97bca23887024570, store: [table=TestAcidGuarantees family=A region=5e91f676554be63c9f656bc420de8a2a] 2024-11-12T19:33:40,486 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/22d5a7a79b7b4b4f97bca23887024570 is 175, key is test_row_1/A:col10/1731440020274/Put/seqid=0 2024-11-12T19:33:40,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742031_1207 (size=22561) 2024-11-12T19:33:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:40,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:40,899 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/22d5a7a79b7b4b4f97bca23887024570 2024-11-12T19:33:40,908 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/b75ab9eefe214178a186df39deabc868 is 50, key is test_row_1/B:col10/1731440020274/Put/seqid=0 2024-11-12T19:33:40,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742032_1208 (size=9857) 2024-11-12T19:33:41,321 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/b75ab9eefe214178a186df39deabc868 2024-11-12T19:33:41,329 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/d13122837cda44d29ce04b257f1fb318 is 50, key is test_row_1/C:col10/1731440020274/Put/seqid=0 2024-11-12T19:33:41,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742033_1209 (size=9857) 2024-11-12T19:33:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:41,733 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/d13122837cda44d29ce04b257f1fb318 2024-11-12T19:33:41,740 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/A/22d5a7a79b7b4b4f97bca23887024570 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/22d5a7a79b7b4b4f97bca23887024570 2024-11-12T19:33:41,747 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/22d5a7a79b7b4b4f97bca23887024570, entries=100, sequenceid=365, filesize=22.0 K 2024-11-12T19:33:41,748 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/B/b75ab9eefe214178a186df39deabc868 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b75ab9eefe214178a186df39deabc868 2024-11-12T19:33:41,755 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b75ab9eefe214178a186df39deabc868, entries=100, sequenceid=365, filesize=9.6 K 2024-11-12T19:33:41,757 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/.tmp/C/d13122837cda44d29ce04b257f1fb318 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d13122837cda44d29ce04b257f1fb318 2024-11-12T19:33:41,763 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d13122837cda44d29ce04b257f1fb318, entries=100, sequenceid=365, filesize=9.6 K 2024-11-12T19:33:41,764 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 5e91f676554be63c9f656bc420de8a2a in 1311ms, sequenceid=365, compaction requested=false 2024-11-12T19:33:41,764 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15] to archive 2024-11-12T19:33:41,766 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:41,769 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/cd3328313c2f4cdf8c257b2dd31e6c66 2024-11-12T19:33:41,770 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5b7d09d1d14046fcabbf71f49dba3142 2024-11-12T19:33:41,772 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/2b76a4fc08894df7af9b6581fb403c1f 2024-11-12T19:33:41,774 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/62c706cf63fc4316aa7d255bac8ce29c 2024-11-12T19:33:41,776 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/105d535f518743ec9c1f82ba6a61780e 2024-11-12T19:33:41,777 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/0c9f05a1fb54489e999a668375f7cc59 2024-11-12T19:33:41,779 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/9dcd38f2a2844538a3922838a626476b 2024-11-12T19:33:41,781 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/ef5393c18da74e35ab17abae0868eb9b 2024-11-12T19:33:41,784 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/5db47cc4c6b04b778402e343e652aced 2024-11-12T19:33:41,788 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/53d08e5ef0ad4278a67ea39ae037a01d 2024-11-12T19:33:41,794 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/f7fbdfc588e14c1fb194ef1aac72d389 2024-11-12T19:33:41,797 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/baea52df4c884b0797d0777e325a8a21 2024-11-12T19:33:41,799 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e82b88d0c89740de9445e9eecaf9d565 2024-11-12T19:33:41,801 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/1d230b9ecafe4dd081d7fdea2f74a3d6 2024-11-12T19:33:41,803 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95d48d53aa7741c687ea266938dafc2b 2024-11-12T19:33:41,809 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3623590aa8484715919a28baa148d9e2 2024-11-12T19:33:41,811 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/b1b18f36237847ac926fa9cd02e510ed 2024-11-12T19:33:41,817 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8596f2569a494c84977ce26ea0fe97be 2024-11-12T19:33:41,819 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/84c898bb4e584fd8b680bca719e5bdfd 2024-11-12T19:33:41,821 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fbc095d6c7ec4620a40249d5d216eed8 2024-11-12T19:33:41,822 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/430eb253525945598b584b4b26a62bb6 2024-11-12T19:33:41,825 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/3b0b2e177a9e44369a0be9e2d795fbf7 2024-11-12T19:33:41,828 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/8be38787d12442bea5c31c773624dfaf 2024-11-12T19:33:41,830 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/95070791cc3e40e7a1b8c1ee4dafb92d 2024-11-12T19:33:41,835 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/fe34da5b91344534a82e6f005c3d4a15 2024-11-12T19:33:41,843 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/02fb4b8ce7e2465eb24bb3dd139b195b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/5eb0d4d84ce940878f61118236ee29e3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e744cd9805448b599ff9d74d531524e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/dc24a2695d5b4bee97702f7bd19ac542, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e683270336a432980eda2bae5c15cc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/049ee12e96a14aa5bfa2e9b74ae54c33, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/932830910c844eeebaee884ca4422e4d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53] to archive 2024-11-12T19:33:41,845 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:41,855 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/e0af73c0ae884f2ba846b2904f7466d1 2024-11-12T19:33:41,860 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f3194d706a7b451e93586c6c5c2cf5ae 2024-11-12T19:33:41,871 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/02fb4b8ce7e2465eb24bb3dd139b195b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/02fb4b8ce7e2465eb24bb3dd139b195b 2024-11-12T19:33:41,876 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/f26248b16c5a409f93f106d1e9d16768 2024-11-12T19:33:41,878 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/231b1e419b4a41fe8a385ce77841069d 2024-11-12T19:33:41,879 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/5eb0d4d84ce940878f61118236ee29e3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/5eb0d4d84ce940878f61118236ee29e3 2024-11-12T19:33:41,881 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/87d9f8fc0c9f425782a7ce65e565fe77 2024-11-12T19:33:41,882 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/1756c16580594f9b940e3767d208d7df 2024-11-12T19:33:41,884 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e744cd9805448b599ff9d74d531524e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e744cd9805448b599ff9d74d531524e 2024-11-12T19:33:41,888 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/ec9f66177d2446098d038a28b1c33a58 2024-11-12T19:33:41,891 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/fcca73aee9344ef98a6e9e6dee277943 2024-11-12T19:33:41,892 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/dc24a2695d5b4bee97702f7bd19ac542 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/dc24a2695d5b4bee97702f7bd19ac542 2024-11-12T19:33:41,895 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/bf920771d57142fa88fcdcc32e089f1e 2024-11-12T19:33:41,896 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/16bca27f108c4ad1ad00505bc61615af 2024-11-12T19:33:41,898 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e683270336a432980eda2bae5c15cc0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/0e683270336a432980eda2bae5c15cc0 2024-11-12T19:33:41,899 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/d8db59c17f2340aa8c18e7f4cecdfb92 2024-11-12T19:33:41,901 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/4d794c1e1fb54a929ad8bb8bbbaf633e 2024-11-12T19:33:41,904 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/049ee12e96a14aa5bfa2e9b74ae54c33 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/049ee12e96a14aa5bfa2e9b74ae54c33 2024-11-12T19:33:41,906 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/2d064ffe9cc54d76bff3a8cc1594e06b 2024-11-12T19:33:41,909 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/7716864aae8543bc9485c4f719e4841f 2024-11-12T19:33:41,914 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/932830910c844eeebaee884ca4422e4d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/932830910c844eeebaee884ca4422e4d 2024-11-12T19:33:41,916 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/71017523503c4923a5261f2b62ea0297 2024-11-12T19:33:41,918 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/92f8e3e4f42c499fb3a448b83f774c4f 2024-11-12T19:33:41,920 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/36a1559c126d45fbb1634214d5583837 2024-11-12T19:33:41,921 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/a64764b7362f432693e3e375d525dc53 2024-11-12T19:33:41,931 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d3d9c4b141af4835a56a7113f0c3e551, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/350157dc15404f6c986d8c091296a180, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/ff63a99cb4d6472a9eaddb6b607b9a2e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/24c5f315decd415e904ad78dae9b8a2d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3d84ea62c1ae41bbb9063c2977671c3d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/88736840755147b290bde9e4d68b8258, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/140a9fe6e3b5432e926c60617533ae32, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677] to archive 2024-11-12T19:33:41,932 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:33:41,937 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/cb5baac74bb7420b8d028d1805820498 2024-11-12T19:33:41,940 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e720d9e706e7447d83d7609839ca4152 2024-11-12T19:33:41,943 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d3d9c4b141af4835a56a7113f0c3e551 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d3d9c4b141af4835a56a7113f0c3e551 2024-11-12T19:33:41,946 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4537c2580ee3433b9edb47c0c854a75f 2024-11-12T19:33:41,947 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e1540695c8a423984bfdee88715e3e3 2024-11-12T19:33:41,953 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/350157dc15404f6c986d8c091296a180 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/350157dc15404f6c986d8c091296a180 2024-11-12T19:33:41,955 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4115cce62ced4873ae7e8cf2de23598c 2024-11-12T19:33:41,959 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/8e2f633c54f140e1a0b0509544db6cda 2024-11-12T19:33:41,961 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/ff63a99cb4d6472a9eaddb6b607b9a2e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/ff63a99cb4d6472a9eaddb6b607b9a2e 2024-11-12T19:33:41,963 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a12fe3fecf3b45159069e046f4114eb8 2024-11-12T19:33:41,965 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/a40e4b364aeb4bbdaa464cf5578a9570 2024-11-12T19:33:41,967 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/24c5f315decd415e904ad78dae9b8a2d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/24c5f315decd415e904ad78dae9b8a2d 2024-11-12T19:33:41,969 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3601d138f0434e4695a6bcddf4f2419d 2024-11-12T19:33:41,970 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/9e361c813e434d32a4ced37cba9e5eab 2024-11-12T19:33:41,972 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3d84ea62c1ae41bbb9063c2977671c3d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/3d84ea62c1ae41bbb9063c2977671c3d 2024-11-12T19:33:41,975 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/4b101ccb8ed44855b308c01fbc1949f8 2024-11-12T19:33:41,983 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/2d6f034f0d1a421b83b2368d5047a4b8 2024-11-12T19:33:41,991 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/88736840755147b290bde9e4d68b8258 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/88736840755147b290bde9e4d68b8258 2024-11-12T19:33:41,993 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/f95f23a082224ec5ac7012392675339c 2024-11-12T19:33:41,996 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/55c85fb5caf14a989dfb14f1abe62d7b 2024-11-12T19:33:41,997 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/140a9fe6e3b5432e926c60617533ae32 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/140a9fe6e3b5432e926c60617533ae32 2024-11-12T19:33:41,999 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/b5e1bbd97b6d47dc9ff0a4b4cf6c07b0 2024-11-12T19:33:42,001 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/14812155b45249d3aa578f1cd8cffd42 2024-11-12T19:33:42,003 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/69c4edd6449a4ce085eb3d7b6db51709 2024-11-12T19:33:42,004 DEBUG [StoreCloser-TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/504e307221a9488dad073bb6614c0677 2024-11-12T19:33:42,009 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits/368.seqid, newMaxSeqId=368, maxSeqId=4 2024-11-12T19:33:42,010 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a. 2024-11-12T19:33:42,010 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for 5e91f676554be63c9f656bc420de8a2a: 2024-11-12T19:33:42,011 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed 5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,012 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=5e91f676554be63c9f656bc420de8a2a, regionState=CLOSED 2024-11-12T19:33:42,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-12T19:33:42,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure 5e91f676554be63c9f656bc420de8a2a, server=81d69e608036,33067,1731439956493 in 1.7180 sec 2024-11-12T19:33:42,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-12T19:33:42,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e91f676554be63c9f656bc420de8a2a, UNASSIGN in 1.7230 sec 2024-11-12T19:33:42,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-12T19:33:42,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7270 sec 2024-11-12T19:33:42,026 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440022025"}]},"ts":"1731440022025"} 2024-11-12T19:33:42,027 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:33:42,069 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:33:42,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7910 sec 2024-11-12T19:33:42,211 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-12T19:33:42,388 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-12T19:33:42,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:33:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,390 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,391 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-12T19:33:42,393 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,396 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits] 2024-11-12T19:33:42,398 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/22d5a7a79b7b4b4f97bca23887024570 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/22d5a7a79b7b4b4f97bca23887024570 2024-11-12T19:33:42,400 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e122407d2dde4e3d88a7c4e07bc67c76 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/A/e122407d2dde4e3d88a7c4e07bc67c76 2024-11-12T19:33:42,400 DEBUG [master/81d69e608036:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4793b237becb5eefe1e5fde3a3e5b617 changed from -1.0 to 0.0, refreshing cache 2024-11-12T19:33:42,402 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b26f1f9b382348c2a1d187ef136ddcd2 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b26f1f9b382348c2a1d187ef136ddcd2 2024-11-12T19:33:42,404 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b75ab9eefe214178a186df39deabc868 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/B/b75ab9eefe214178a186df39deabc868 2024-11-12T19:33:42,407 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d13122837cda44d29ce04b257f1fb318 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/d13122837cda44d29ce04b257f1fb318 2024-11-12T19:33:42,408 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e7963d8b7e5b43ecb0351be32f2e8b84 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/C/e7963d8b7e5b43ecb0351be32f2e8b84 2024-11-12T19:33:42,411 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits/368.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a/recovered.edits/368.seqid 2024-11-12T19:33:42,412 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,412 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:33:42,413 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:33:42,413 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-12T19:33:42,418 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112169b1d3ae0714ee9a2f0a4af6251d3a7_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112169b1d3ae0714ee9a2f0a4af6251d3a7_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,419 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112267977279bd4401eabc1b8acf7240731_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112267977279bd4401eabc1b8acf7240731_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,420 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123bbef9b2e1a74c7997b95f96e6d54300_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123bbef9b2e1a74c7997b95f96e6d54300_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,421 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111255e42a8f068243bb8822b3049b2c4744_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111255e42a8f068243bb8822b3049b2c4744_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,423 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f5857b2a4f1409f83ade1a8406a96e7_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f5857b2a4f1409f83ade1a8406a96e7_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,425 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269707720ed0a4770ba1eb4d6e8430ac4_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269707720ed0a4770ba1eb4d6e8430ac4_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,427 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269ca30cfced249f88218a9c1b293340c_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111269ca30cfced249f88218a9c1b293340c_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,431 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111279ba45bdf9cf416b87e53f583e3b79d8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111279ba45bdf9cf416b87e53f583e3b79d8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,433 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127ba0f788d1aa460690afdfa4aa3c485b_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127ba0f788d1aa460690afdfa4aa3c485b_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,434 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282b8e113479649f6bb8f8baa57ee6f40_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282b8e113479649f6bb8f8baa57ee6f40_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,436 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128cafaf22ebe2402a93c0f4e3d8267d13_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128cafaf22ebe2402a93c0f4e3d8267d13_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,439 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ab9c4dec6b1846239757ecd61bc995e8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ab9c4dec6b1846239757ecd61bc995e8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,441 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b05a0be4822748a2819a56f555a32141_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b05a0be4822748a2819a56f555a32141_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,442 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b0d6fde1f43245da8806785e93b25aa6_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b0d6fde1f43245da8806785e93b25aa6_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,443 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b8cb070cb7f74c24adc7b7005b8cd4a6_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112b8cb070cb7f74c24adc7b7005b8cd4a6_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,445 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112bec19271f228402682cc5f723982ec7d_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112bec19271f228402682cc5f723982ec7d_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,447 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112c47d0fe613cc4806852d6caffe8f1183_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112c47d0fe613cc4806852d6caffe8f1183_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,448 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e9d2975cb97a470da8f49c4f367d54a8_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e9d2975cb97a470da8f49c4f367d54a8_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,449 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112efb00fd72dc243d3914b643ac3922937_5e91f676554be63c9f656bc420de8a2a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112efb00fd72dc243d3914b643ac3922937_5e91f676554be63c9f656bc420de8a2a 2024-11-12T19:33:42,450 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:33:42,452 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,453 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:33:42,456 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:33:42,457 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,457 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:33:42,457 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731440022457"}]},"ts":"9223372036854775807"} 2024-11-12T19:33:42,459 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:33:42,459 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5e91f676554be63c9f656bc420de8a2a, NAME => 'TestAcidGuarantees,,1731439993258.5e91f676554be63c9f656bc420de8a2a.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:33:42,460 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:33:42,460 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731440022460"}]},"ts":"9223372036854775807"} 2024-11-12T19:33:42,465 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:33:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-12T19:33:42,504 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 116 msec 2024-11-12T19:33:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-12T19:33:42,693 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-12T19:33:42,702 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240 (was 235) Potentially hanging thread: hconnection-0xf069af2-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_766517094_22 at /127.0.0.1:56002 [Waiting for operation #217] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1271648922_22 at /127.0.0.1:55922 [Waiting for operation #240] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1271648922_22 at /127.0.0.1:49904 [Waiting for operation #87] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf069af2-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=461 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1503 (was 1607), ProcessCount=11 (was 11), AvailableMemoryMB=1344 (was 2247) 2024-11-12T19:33:42,711 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=1503, ProcessCount=11, AvailableMemoryMB=1343 2024-11-12T19:33:42,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:33:42,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:33:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:33:42,714 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:33:42,714 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:42,714 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-12T19:33:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:42,715 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:33:42,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742034_1210 (size=960) 2024-11-12T19:33:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:43,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:43,122 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:33:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742035_1211 (size=53) 2024-11-12T19:33:43,284 ERROR [LeaseRenewer:jenkins@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:41367,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2056d7413c228b8ad5515802b19e3905, disabling compactions & flushes 2024-11-12T19:33:43,538 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. after waiting 0 ms 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,538 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,538 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:43,539 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:33:43,540 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731440023539"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731440023539"}]},"ts":"1731440023539"} 2024-11-12T19:33:43,541 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:33:43,541 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:33:43,542 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440023542"}]},"ts":"1731440023542"} 2024-11-12T19:33:43,543 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:33:43,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, ASSIGN}] 2024-11-12T19:33:43,596 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, ASSIGN 2024-11-12T19:33:43,598 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:33:43,748 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=2056d7413c228b8ad5515802b19e3905, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:43,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:33:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:43,901 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:43,904 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:33:43,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:33:43,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,907 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,908 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:43,909 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2056d7413c228b8ad5515802b19e3905 columnFamilyName A 2024-11-12T19:33:43,909 DEBUG [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:43,909 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(327): Store=2056d7413c228b8ad5515802b19e3905/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:43,909 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,911 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:43,911 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2056d7413c228b8ad5515802b19e3905 columnFamilyName B 2024-11-12T19:33:43,911 DEBUG [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:43,912 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(327): Store=2056d7413c228b8ad5515802b19e3905/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:43,912 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,913 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:33:43,913 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2056d7413c228b8ad5515802b19e3905 columnFamilyName C 2024-11-12T19:33:43,913 DEBUG [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:33:43,914 INFO [StoreOpener-2056d7413c228b8ad5515802b19e3905-1 {}] regionserver.HStore(327): Store=2056d7413c228b8ad5515802b19e3905/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:33:43,914 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,915 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,915 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,916 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:33:43,918 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:43,921 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:33:43,922 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 2056d7413c228b8ad5515802b19e3905; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59430355, jitterRate=-0.11441870033740997}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:33:43,923 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:43,924 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., pid=66, masterSystemTime=1731440023901 2024-11-12T19:33:43,925 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,925 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:43,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=2056d7413c228b8ad5515802b19e3905, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:33:43,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-12T19:33:43,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 in 177 msec 2024-11-12T19:33:43,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-12T19:33:43,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, ASSIGN in 335 msec 2024-11-12T19:33:43,933 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:33:43,933 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440023933"}]},"ts":"1731440023933"} 2024-11-12T19:33:43,935 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:33:43,954 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:33:43,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2420 sec 2024-11-12T19:33:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-12T19:33:44,819 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-12T19:33:44,821 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x267e0963 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72a7721c 2024-11-12T19:33:44,862 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@faa31c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,863 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:44,865 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:44,867 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:33:44,868 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:33:44,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3eec6530 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7792c763 2024-11-12T19:33:44,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a568ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,886 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dc273c3 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c8a18c7 2024-11-12T19:33:44,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e0e280, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,898 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x195206da to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45426917 2024-11-12T19:33:44,909 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473477dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x282318cf to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e7fc60d 2024-11-12T19:33:44,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a91dc80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,928 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ea91426 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e66ea50 2024-11-12T19:33:44,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a874cc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f50b381 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6119e7 2024-11-12T19:33:44,970 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31178bc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,973 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x124edab0 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7507573f 2024-11-12T19:33:44,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78439bc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:44,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x712d7bc3 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e5c7476 2024-11-12T19:33:45,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a2545d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:45,004 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40da73c1 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df84068 2024-11-12T19:33:45,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d039dc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:45,018 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3be398a9 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644774bd 2024-11-12T19:33:45,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15db087a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:33:45,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:45,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-12T19:33:45,038 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:45,039 DEBUG [hconnection-0x39e36254-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,039 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:45,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:45,040 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,040 DEBUG [hconnection-0x22ce651f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,041 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,043 DEBUG [hconnection-0xb8848d6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,044 DEBUG [hconnection-0x2e54904d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,044 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,045 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,047 DEBUG [hconnection-0x453c987a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,048 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,051 DEBUG [hconnection-0x4703144d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,052 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:45,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:45,079 DEBUG [hconnection-0x570aaf5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,080 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,099 DEBUG [hconnection-0x7fd4894e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,100 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,123 DEBUG [hconnection-0x71a84111-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,124 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440085119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440085119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440085123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440085125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:45,157 DEBUG [hconnection-0x61e357d3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:33:45,159 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:33:45,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440085160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/563394327a2c405d979be5aabb4f7d06 is 50, key is test_row_0/A:col10/1731440025066/Put/seqid=0 2024-11-12T19:33:45,192 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742036_1212 (size=12001) 2024-11-12T19:33:45,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440085227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440085227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440085235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440085236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440085263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:45,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440085432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440085435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440085438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440085442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440085479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/563394327a2c405d979be5aabb4f7d06 2024-11-12T19:33:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:45,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7c8c9b1276494de7b82515d50e9a8f84 is 50, key is test_row_0/B:col10/1731440025066/Put/seqid=0 2024-11-12T19:33:45,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742037_1213 (size=12001) 2024-11-12T19:33:45,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440085736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440085743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440085743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440085746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440085785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:45,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:45,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:45,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:45,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:45,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7c8c9b1276494de7b82515d50e9a8f84 2024-11-12T19:33:46,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c4f4ddb5bda64042bb3b21f330a87052 is 50, key is test_row_0/C:col10/1731440025066/Put/seqid=0 2024-11-12T19:33:46,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742038_1214 (size=12001) 2024-11-12T19:33:46,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c4f4ddb5bda64042bb3b21f330a87052 2024-11-12T19:33:46,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/563394327a2c405d979be5aabb4f7d06 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06 2024-11-12T19:33:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:46,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:33:46,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7c8c9b1276494de7b82515d50e9a8f84 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84 2024-11-12T19:33:46,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:33:46,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c4f4ddb5bda64042bb3b21f330a87052 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052 2024-11-12T19:33:46,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052, entries=150, sequenceid=13, filesize=11.7 K 2024-11-12T19:33:46,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2056d7413c228b8ad5515802b19e3905 in 1096ms, sequenceid=13, compaction requested=false 2024-11-12T19:33:46,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:46,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:46,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:46,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/29fe637f2dbc4f6596b76e90096b3658 is 50, key is test_row_0/A:col10/1731440026241/Put/seqid=0 2024-11-12T19:33:46,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440086253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440086254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440086255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440086254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742039_1215 (size=12001) 2024-11-12T19:33:46,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440086288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440086357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440086357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440086357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440086359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440086559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440086563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440086563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440086563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,579 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/29fe637f2dbc4f6596b76e90096b3658 2024-11-12T19:33:46,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/ab679382c9a74b67ab586367e8c25c07 is 50, key is test_row_0/B:col10/1731440026241/Put/seqid=0 2024-11-12T19:33:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742040_1216 (size=12001) 2024-11-12T19:33:46,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440086863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440086866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440086868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440086869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:46,888 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:46,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:46,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:46,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:47,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:47,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:47,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/ab679382c9a74b67ab586367e8c25c07 2024-11-12T19:33:47,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4622a2409e6145daa09a687cde406b1d is 50, key is test_row_0/C:col10/1731440026241/Put/seqid=0 2024-11-12T19:33:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742041_1217 (size=12001) 2024-11-12T19:33:47,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:47,210 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:47,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:47,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440087292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:47,363 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:47,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:47,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:47,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:47,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440087366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:47,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440087373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:47,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440087373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:47,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:47,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440087374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:47,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:47,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:47,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4622a2409e6145daa09a687cde406b1d 2024-11-12T19:33:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/29fe637f2dbc4f6596b76e90096b3658 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658 2024-11-12T19:33:47,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658, entries=150, sequenceid=38, filesize=11.7 K 2024-11-12T19:33:47,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/ab679382c9a74b67ab586367e8c25c07 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07 2024-11-12T19:33:47,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07, entries=150, sequenceid=38, filesize=11.7 K 2024-11-12T19:33:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4622a2409e6145daa09a687cde406b1d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d 2024-11-12T19:33:47,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d, entries=150, sequenceid=38, filesize=11.7 K 2024-11-12T19:33:47,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2056d7413c228b8ad5515802b19e3905 in 1324ms, sequenceid=38, compaction requested=false 2024-11-12T19:33:47,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:47,582 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:33:47,668 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:47,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-12T19:33:47,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,669 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:47,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:47,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/bbd0cecd40074543b557d61a279e7885 is 50, key is test_row_0/A:col10/1731440026249/Put/seqid=0 2024-11-12T19:33:47,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742042_1218 (size=12001) 2024-11-12T19:33:47,706 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/bbd0cecd40074543b557d61a279e7885 2024-11-12T19:33:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/2f29f12c9c794157b871e3482ff723be is 50, key is test_row_0/B:col10/1731440026249/Put/seqid=0 2024-11-12T19:33:47,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742043_1219 (size=12001) 2024-11-12T19:33:47,732 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/2f29f12c9c794157b871e3482ff723be 2024-11-12T19:33:47,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/3def31eb4642475d9c1add240d114640 is 50, key is test_row_0/C:col10/1731440026249/Put/seqid=0 2024-11-12T19:33:47,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742044_1220 (size=12001) 2024-11-12T19:33:47,771 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/3def31eb4642475d9c1add240d114640 2024-11-12T19:33:47,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/bbd0cecd40074543b557d61a279e7885 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885 2024-11-12T19:33:47,791 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885, entries=150, sequenceid=49, filesize=11.7 K 2024-11-12T19:33:47,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/2f29f12c9c794157b871e3482ff723be as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be 2024-11-12T19:33:47,803 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be, entries=150, sequenceid=49, filesize=11.7 K 2024-11-12T19:33:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/3def31eb4642475d9c1add240d114640 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640 2024-11-12T19:33:47,809 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640, entries=150, sequenceid=49, filesize=11.7 K 2024-11-12T19:33:47,814 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 2056d7413c228b8ad5515802b19e3905 in 146ms, sequenceid=49, compaction requested=true 2024-11-12T19:33:47,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:47,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:47,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-12T19:33:47,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-12T19:33:47,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-12T19:33:47,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7760 sec 2024-11-12T19:33:47,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.7820 sec 2024-11-12T19:33:48,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:48,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:48,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/fc7d4fb1157e4af8bac77ad423ddd6c7 is 50, key is test_row_0/A:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:48,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742045_1221 (size=12001) 2024-11-12T19:33:48,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/fc7d4fb1157e4af8bac77ad423ddd6c7 2024-11-12T19:33:48,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/96aafb17fec84cf1b0314dbec39d2819 is 50, key is test_row_0/B:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:48,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440088419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440088424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440088424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440088424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742046_1222 (size=12001) 2024-11-12T19:33:48,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440088531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440088531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440088535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440088535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440088736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440088737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440088743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440088744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:48,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/96aafb17fec84cf1b0314dbec39d2819 2024-11-12T19:33:48,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a26f59694694522a3d9611756a4b85c is 50, key is test_row_0/C:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:48,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742047_1223 (size=12001) 2024-11-12T19:33:48,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a26f59694694522a3d9611756a4b85c 2024-11-12T19:33:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/fc7d4fb1157e4af8bac77ad423ddd6c7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7 2024-11-12T19:33:48,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7, entries=150, sequenceid=60, filesize=11.7 K 2024-11-12T19:33:48,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/96aafb17fec84cf1b0314dbec39d2819 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819 2024-11-12T19:33:48,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819, entries=150, sequenceid=60, filesize=11.7 K 2024-11-12T19:33:48,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a26f59694694522a3d9611756a4b85c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c 2024-11-12T19:33:48,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c, entries=150, sequenceid=60, filesize=11.7 K 2024-11-12T19:33:48,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 2056d7413c228b8ad5515802b19e3905 in 564ms, sequenceid=60, compaction requested=true 2024-11-12T19:33:48,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:48,951 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:48,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:48,952 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:48,954 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:48,954 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:48,954 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:48,954 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=46.9 K 2024-11-12T19:33:48,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:48,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:48,955 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:48,955 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=46.9 K 2024-11-12T19:33:48,956 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c8c9b1276494de7b82515d50e9a8f84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731440025052 2024-11-12T19:33:48,956 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 563394327a2c405d979be5aabb4f7d06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731440025052 2024-11-12T19:33:48,957 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab679382c9a74b67ab586367e8c25c07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731440025109 2024-11-12T19:33:48,958 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 29fe637f2dbc4f6596b76e90096b3658, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731440025109 2024-11-12T19:33:48,958 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f29f12c9c794157b871e3482ff723be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731440026244 2024-11-12T19:33:48,958 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bbd0cecd40074543b557d61a279e7885, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731440026244 2024-11-12T19:33:48,959 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96aafb17fec84cf1b0314dbec39d2819, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:48,959 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting fc7d4fb1157e4af8bac77ad423ddd6c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:48,985 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#177 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:48,985 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/a29264486d3640a5b416603a59a166c7 is 50, key is test_row_0/A:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:49,003 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#178 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:49,004 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/e1ed743c73c34f17bc1df4ade133ae40 is 50, key is test_row_0/B:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:49,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:49,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:49,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742049_1225 (size=12139) 2024-11-12T19:33:49,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742048_1224 (size=12139) 2024-11-12T19:33:49,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/a0fc1b462ea84a2d9f559ed106a7a982 is 50, key is test_row_0/A:col10/1731440028423/Put/seqid=0 2024-11-12T19:33:49,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440089070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440089074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440089084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,089 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/e1ed743c73c34f17bc1df4ade133ae40 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e1ed743c73c34f17bc1df4ade133ae40 2024-11-12T19:33:49,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440089085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,107 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/a29264486d3640a5b416603a59a166c7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a29264486d3640a5b416603a59a166c7 2024-11-12T19:33:49,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742050_1226 (size=14341) 2024-11-12T19:33:49,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/a0fc1b462ea84a2d9f559ed106a7a982 2024-11-12T19:33:49,121 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into e1ed743c73c34f17bc1df4ade133ae40(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:49,121 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:49,121 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=12, startTime=1731440028952; duration=0sec 2024-11-12T19:33:49,121 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:49,121 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:49,124 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:49,131 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:49,131 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:49,131 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,132 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=46.9 K 2024-11-12T19:33:49,133 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4f4ddb5bda64042bb3b21f330a87052, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731440025052 2024-11-12T19:33:49,133 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4622a2409e6145daa09a687cde406b1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731440025109 2024-11-12T19:33:49,134 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3def31eb4642475d9c1add240d114640, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731440026244 2024-11-12T19:33:49,134 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a26f59694694522a3d9611756a4b85c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:49,136 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into a29264486d3640a5b416603a59a166c7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:49,136 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:49,136 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=12, startTime=1731440028951; duration=0sec 2024-11-12T19:33:49,136 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:49,136 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-12T19:33:49,147 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-12T19:33:49,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-12T19:33:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:49,161 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:49,161 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:49,162 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/a807fcc86e344abe9754fae2291d2740 is 50, key is test_row_0/C:col10/1731440028378/Put/seqid=0 2024-11-12T19:33:49,165 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:49,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:49,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440089187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440089189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440089189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440089191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/882eab8c227e42d39df6e9efb00e922a is 50, key is test_row_0/B:col10/1731440028423/Put/seqid=0 2024-11-12T19:33:49,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742051_1227 (size=12139) 2024-11-12T19:33:49,233 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/a807fcc86e344abe9754fae2291d2740 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a807fcc86e344abe9754fae2291d2740 2024-11-12T19:33:49,246 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into a807fcc86e344abe9754fae2291d2740(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:49,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:49,246 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=12, startTime=1731440028952; duration=0sec 2024-11-12T19:33:49,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:49,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:49,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:49,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742052_1228 (size=12001) 2024-11-12T19:33:49,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/882eab8c227e42d39df6e9efb00e922a 2024-11-12T19:33:49,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b953a9cf691c4baebb8b442f5374ba22 is 50, key is test_row_0/C:col10/1731440028423/Put/seqid=0 2024-11-12T19:33:49,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440089318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,324 DEBUG [Thread-1005 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4250 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:49,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:49,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-12T19:33:49,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:49,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742053_1229 (size=12001) 2024-11-12T19:33:49,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b953a9cf691c4baebb8b442f5374ba22 2024-11-12T19:33:49,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/a0fc1b462ea84a2d9f559ed106a7a982 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982 2024-11-12T19:33:49,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982, entries=200, sequenceid=88, filesize=14.0 K 2024-11-12T19:33:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440089392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440089392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/882eab8c227e42d39df6e9efb00e922a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a 2024-11-12T19:33:49,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a, entries=150, sequenceid=88, filesize=11.7 K 2024-11-12T19:33:49,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b953a9cf691c4baebb8b442f5374ba22 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22 2024-11-12T19:33:49,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440089401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22, entries=150, sequenceid=88, filesize=11.7 K 2024-11-12T19:33:49,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 2056d7413c228b8ad5515802b19e3905 in 381ms, sequenceid=88, compaction requested=false 2024-11-12T19:33:49,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:49,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:49,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:49,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/01ce0cf6e7e744ee8adb46c5d8e66698 is 50, key is test_row_0/A:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:49,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:49,491 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:49,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-12T19:33:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742054_1230 (size=12001) 2024-11-12T19:33:49,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/01ce0cf6e7e744ee8adb46c5d8e66698 2024-11-12T19:33:49,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6119d70657c34f43ab9576cd16ab00cd is 50, key is test_row_0/B:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:49,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742055_1231 (size=12001) 2024-11-12T19:33:49,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6119d70657c34f43ab9576cd16ab00cd 2024-11-12T19:33:49,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/adc2b80fd13f49d38d849963b1a18aec is 50, key is test_row_0/C:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:49,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742056_1232 (size=12001) 2024-11-12T19:33:49,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-12T19:33:49,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:49,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/adc2b80fd13f49d38d849963b1a18aec 2024-11-12T19:33:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440089679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/01ce0cf6e7e744ee8adb46c5d8e66698 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698 2024-11-12T19:33:49,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440089703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440089707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698, entries=150, sequenceid=102, filesize=11.7 K 2024-11-12T19:33:49,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440089728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6119d70657c34f43ab9576cd16ab00cd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd 2024-11-12T19:33:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:49,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd, entries=150, sequenceid=102, filesize=11.7 K 2024-11-12T19:33:49,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/adc2b80fd13f49d38d849963b1a18aec as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec 2024-11-12T19:33:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440089798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:49,812 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:49,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec, entries=150, sequenceid=102, filesize=11.7 K 2024-11-12T19:33:49,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-12T19:33:49,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:49,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:49,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2056d7413c228b8ad5515802b19e3905 in 382ms, sequenceid=102, compaction requested=true 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:49,816 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:49,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:49,816 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:49,828 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:49,828 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:49,828 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,828 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a29264486d3640a5b416603a59a166c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=37.6 K 2024-11-12T19:33:49,828 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:49,828 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:49,828 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,828 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e1ed743c73c34f17bc1df4ade133ae40, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.3 K 2024-11-12T19:33:49,831 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ed743c73c34f17bc1df4ade133ae40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:49,831 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a29264486d3640a5b416603a59a166c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:49,833 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 882eab8c227e42d39df6e9efb00e922a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440028411 2024-11-12T19:33:49,833 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0fc1b462ea84a2d9f559ed106a7a982, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440028411 2024-11-12T19:33:49,835 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6119d70657c34f43ab9576cd16ab00cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:49,835 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01ce0cf6e7e744ee8adb46c5d8e66698, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:49,881 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#186 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:49,881 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/02c0a87c93474e8bb56e5e1e37819086 is 50, key is test_row_0/B:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:49,890 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#187 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:49,893 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/8486c2ed5a4f4e088264f7f7a341571b is 50, key is test_row_0/A:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:49,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742057_1233 (size=12241) 2024-11-12T19:33:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742058_1234 (size=12241) 2024-11-12T19:33:49,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:49,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-12T19:33:49,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:49,973 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:33:49,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:49,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:49,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:49,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:49,974 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/02c0a87c93474e8bb56e5e1e37819086 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/02c0a87c93474e8bb56e5e1e37819086 2024-11-12T19:33:49,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/4b4739fe7a384dafae478747a2993eca is 50, key is test_row_0/A:col10/1731440029667/Put/seqid=0 2024-11-12T19:33:50,020 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 02c0a87c93474e8bb56e5e1e37819086(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:50,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:50,020 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:50,020 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440029816; duration=0sec 2024-11-12T19:33:50,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:50,020 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:50,020 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:50,021 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:50,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742059_1235 (size=12001) 2024-11-12T19:33:50,033 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:50,033 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:50,033 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:50,033 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a807fcc86e344abe9754fae2291d2740, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.3 K 2024-11-12T19:33:50,035 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/4b4739fe7a384dafae478747a2993eca 2024-11-12T19:33:50,037 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a807fcc86e344abe9754fae2291d2740, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731440028378 2024-11-12T19:33:50,041 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b953a9cf691c4baebb8b442f5374ba22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440028411 2024-11-12T19:33:50,043 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting adc2b80fd13f49d38d849963b1a18aec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:50,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/29c501b02c134832a8b0909edb620961 is 50, key is test_row_0/B:col10/1731440029667/Put/seqid=0 2024-11-12T19:33:50,121 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#190 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:50,121 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/3d994e13dc094399afb108ff59d9a771 is 50, key is test_row_0/C:col10/1731440029066/Put/seqid=0 2024-11-12T19:33:50,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440090131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742060_1236 (size=12001) 2024-11-12T19:33:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742061_1237 (size=12241) 2024-11-12T19:33:50,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440090218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440090223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440090243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440090246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:50,403 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/8486c2ed5a4f4e088264f7f7a341571b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8486c2ed5a4f4e088264f7f7a341571b 2024-11-12T19:33:50,457 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 8486c2ed5a4f4e088264f7f7a341571b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:50,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:50,457 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440029816; duration=0sec 2024-11-12T19:33:50,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:50,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:50,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440090455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:50,575 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/29c501b02c134832a8b0909edb620961 2024-11-12T19:33:50,627 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/3d994e13dc094399afb108ff59d9a771 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3d994e13dc094399afb108ff59d9a771 2024-11-12T19:33:50,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/bd2c8368d47b4e36a2d8ffebc9db75c6 is 50, key is test_row_0/C:col10/1731440029667/Put/seqid=0 2024-11-12T19:33:50,698 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 3d994e13dc094399afb108ff59d9a771(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:50,698 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:50,698 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440029816; duration=0sec 2024-11-12T19:33:50,699 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:50,699 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:50,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742062_1238 (size=12001) 2024-11-12T19:33:50,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440090767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,127 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/bd2c8368d47b4e36a2d8ffebc9db75c6 2024-11-12T19:33:51,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/4b4739fe7a384dafae478747a2993eca as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca 2024-11-12T19:33:51,190 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca, entries=150, sequenceid=127, filesize=11.7 K 2024-11-12T19:33:51,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/29c501b02c134832a8b0909edb620961 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961 2024-11-12T19:33:51,201 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961, entries=150, sequenceid=127, filesize=11.7 K 2024-11-12T19:33:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/bd2c8368d47b4e36a2d8ffebc9db75c6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6 2024-11-12T19:33:51,209 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6, entries=150, sequenceid=127, filesize=11.7 K 2024-11-12T19:33:51,210 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2056d7413c228b8ad5515802b19e3905 in 1237ms, sequenceid=127, compaction requested=false 2024-11-12T19:33:51,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:51,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-12T19:33:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-12T19:33:51,214 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-12T19:33:51,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0470 sec 2024-11-12T19:33:51,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.0630 sec 2024-11-12T19:33:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:51,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:51,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 is 50, key is test_row_0/A:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:51,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-12T19:33:51,263 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-12T19:33:51,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-12T19:33:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:51,268 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:51,271 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:51,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742063_1239 (size=12151) 2024-11-12T19:33:51,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440091287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440091290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440091290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440091291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440091395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440091395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440091398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440091412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:51,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-12T19:33:51,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:51,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:51,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:51,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-12T19:33:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440091603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440091603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440091603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440091615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 2024-11-12T19:33:51,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/352d75fcd3cf4763b470365a8ac0b93c is 50, key is test_row_0/B:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:51,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:51,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-12T19:33:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742064_1240 (size=12151) 2024-11-12T19:33:51,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/352d75fcd3cf4763b470365a8ac0b93c 2024-11-12T19:33:51,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a740b701f1f4e219e65293d620a6175 is 50, key is test_row_0/C:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:51,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742065_1241 (size=12151) 2024-11-12T19:33:51,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a740b701f1f4e219e65293d620a6175 2024-11-12T19:33:51,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 2024-11-12T19:33:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:51,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:51,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-12T19:33:51,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:51,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440091910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:51,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440091911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440091914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:51,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440091933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:51,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:33:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/352d75fcd3cf4763b470365a8ac0b93c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c 2024-11-12T19:33:51,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:33:51,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0a740b701f1f4e219e65293d620a6175 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175 2024-11-12T19:33:51,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175, entries=150, sequenceid=143, filesize=11.9 K 2024-11-12T19:33:51,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 2056d7413c228b8ad5515802b19e3905 in 749ms, sequenceid=143, compaction requested=true 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:51,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-12T19:33:51,988 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:51,988 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:51,991 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:51,992 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:51,992 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,992 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8486c2ed5a4f4e088264f7f7a341571b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.5 K 2024-11-12T19:33:51,992 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:51,992 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:51,992 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:51,992 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3d994e13dc094399afb108ff59d9a771, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.5 K 2024-11-12T19:33:51,993 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8486c2ed5a4f4e088264f7f7a341571b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:51,993 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b4739fe7a384dafae478747a2993eca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440029633 2024-11-12T19:33:51,993 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d994e13dc094399afb108ff59d9a771, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:51,994 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea1c2b9a7a2e4188b2d66dfea1fa7b65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:51,995 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bd2c8368d47b4e36a2d8ffebc9db75c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440029633 2024-11-12T19:33:51,996 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a740b701f1f4e219e65293d620a6175, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:52,009 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#195 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:52,009 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/c84a0d8c959947898a36393abb5f6f7e is 50, key is test_row_0/A:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:52,013 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#196 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:52,013 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/6f7509f72aec4597af9b7584eeaa89fa is 50, key is test_row_0/C:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:52,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742067_1243 (size=12493) 2024-11-12T19:33:52,035 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/6f7509f72aec4597af9b7584eeaa89fa as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6f7509f72aec4597af9b7584eeaa89fa 2024-11-12T19:33:52,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742066_1242 (size=12493) 2024-11-12T19:33:52,050 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 6f7509f72aec4597af9b7584eeaa89fa(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:52,050 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:52,050 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440031988; duration=0sec 2024-11-12T19:33:52,050 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:52,050 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:52,050 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:52,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:52,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:52,052 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:52,052 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/02c0a87c93474e8bb56e5e1e37819086, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.5 K 2024-11-12T19:33:52,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 02c0a87c93474e8bb56e5e1e37819086, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731440029066 2024-11-12T19:33:52,053 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 29c501b02c134832a8b0909edb620961, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440029633 2024-11-12T19:33:52,053 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 352d75fcd3cf4763b470365a8ac0b93c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:52,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:52,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-12T19:33:52,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:52,071 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:52,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:52,073 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#197 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:52,073 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6a318b88e728467ebd01d2d69b49ca94 is 50, key is test_row_0/B:col10/1731440031236/Put/seqid=0 2024-11-12T19:33:52,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7c26b9ecc66b48148252698704ed7f3e is 50, key is test_row_0/A:col10/1731440031287/Put/seqid=0 2024-11-12T19:33:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742069_1245 (size=12151) 2024-11-12T19:33:52,093 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7c26b9ecc66b48148252698704ed7f3e 2024-11-12T19:33:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742068_1244 (size=12493) 2024-11-12T19:33:52,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/1206ccb9f3144c7eb6bfd068095644e6 is 50, key is test_row_0/B:col10/1731440031287/Put/seqid=0 2024-11-12T19:33:52,114 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6a318b88e728467ebd01d2d69b49ca94 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6a318b88e728467ebd01d2d69b49ca94 2024-11-12T19:33:52,122 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 6a318b88e728467ebd01d2d69b49ca94(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:52,122 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:52,122 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440031988; duration=0sec 2024-11-12T19:33:52,122 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:52,122 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:52,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742070_1246 (size=12151) 2024-11-12T19:33:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:52,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440092440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440092443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440092443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440092444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,460 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/c84a0d8c959947898a36393abb5f6f7e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/c84a0d8c959947898a36393abb5f6f7e 2024-11-12T19:33:52,472 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into c84a0d8c959947898a36393abb5f6f7e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:52,472 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:52,472 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440031988; duration=0sec 2024-11-12T19:33:52,473 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:52,473 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:52,544 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/1206ccb9f3144c7eb6bfd068095644e6 2024-11-12T19:33:52,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440092548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440092548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440092548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eb7acee2d05f4c4e9f63414f7a57206f is 50, key is test_row_0/C:col10/1731440031287/Put/seqid=0 2024-11-12T19:33:52,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742071_1247 (size=12151) 2024-11-12T19:33:52,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440092750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440092751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440092754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:52,994 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eb7acee2d05f4c4e9f63414f7a57206f 2024-11-12T19:33:52,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7c26b9ecc66b48148252698704ed7f3e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e 2024-11-12T19:33:53,004 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e, entries=150, sequenceid=169, filesize=11.9 K 2024-11-12T19:33:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/1206ccb9f3144c7eb6bfd068095644e6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6 2024-11-12T19:33:53,009 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6, entries=150, sequenceid=169, filesize=11.9 K 2024-11-12T19:33:53,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eb7acee2d05f4c4e9f63414f7a57206f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f 2024-11-12T19:33:53,038 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f, entries=150, sequenceid=169, filesize=11.9 K 2024-11-12T19:33:53,039 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2056d7413c228b8ad5515802b19e3905 in 968ms, sequenceid=169, compaction requested=false 2024-11-12T19:33:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-12T19:33:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-12T19:33:53,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-12T19:33:53,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7700 sec 2024-11-12T19:33:53,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.7820 sec 2024-11-12T19:33:53,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7316216055ae41498cf6e5a4dd2b84bc is 50, key is test_row_0/A:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742072_1248 (size=14541) 2024-11-12T19:33:53,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7316216055ae41498cf6e5a4dd2b84bc 2024-11-12T19:33:53,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6bf35f21e27142949f95258df160c6b7 is 50, key is test_row_0/B:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440093123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440093124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440093126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742073_1249 (size=12151) 2024-11-12T19:33:53,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6bf35f21e27142949f95258df160c6b7 2024-11-12T19:33:53,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/9ef267fcfeb44b1e97ff5d492630de91 is 50, key is test_row_0/C:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742074_1250 (size=12151) 2024-11-12T19:33:53,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/9ef267fcfeb44b1e97ff5d492630de91 2024-11-12T19:33:53,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/7316216055ae41498cf6e5a4dd2b84bc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc 2024-11-12T19:33:53,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc, entries=200, sequenceid=185, filesize=14.2 K 2024-11-12T19:33:53,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/6bf35f21e27142949f95258df160c6b7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7 2024-11-12T19:33:53,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7, entries=150, sequenceid=185, filesize=11.9 K 2024-11-12T19:33:53,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/9ef267fcfeb44b1e97ff5d492630de91 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91 2024-11-12T19:33:53,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91, entries=150, sequenceid=185, filesize=11.9 K 2024-11-12T19:33:53,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2056d7413c228b8ad5515802b19e3905 in 181ms, sequenceid=185, compaction requested=true 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:53,243 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:33:53,245 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:53,245 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:53,245 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,245 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/c84a0d8c959947898a36393abb5f6f7e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=38.3 K 2024-11-12T19:33:53,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c84a0d8c959947898a36393abb5f6f7e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:53,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c26b9ecc66b48148252698704ed7f3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1731440031280 2024-11-12T19:33:53,246 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:53,247 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7316216055ae41498cf6e5a4dd2b84bc, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:53,251 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:53,251 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:53,251 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,251 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6a318b88e728467ebd01d2d69b49ca94, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.9 K 2024-11-12T19:33:53,251 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a318b88e728467ebd01d2d69b49ca94, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:53,252 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1206ccb9f3144c7eb6bfd068095644e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1731440031280 2024-11-12T19:33:53,253 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bf35f21e27142949f95258df160c6b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:53,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:33:53,260 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#204 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:53,261 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/55f63bac7a464269a6e23ce5335d095f is 50, key is test_row_0/A:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:53,271 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#205 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:53,272 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/f257381a1cd2409a891ad45d695d0330 is 50, key is test_row_0/B:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440093279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440093280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440093282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/66f93090fbca43b38d4ec3940b369550 is 50, key is test_row_0/A:col10/1731440033258/Put/seqid=0 2024-11-12T19:33:53,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742075_1251 (size=12595) 2024-11-12T19:33:53,346 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/55f63bac7a464269a6e23ce5335d095f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/55f63bac7a464269a6e23ce5335d095f 2024-11-12T19:33:53,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440093347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,351 DEBUG [Thread-1005 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8277 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:53,353 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 55f63bac7a464269a6e23ce5335d095f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:53,353 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:53,353 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440033243; duration=0sec 2024-11-12T19:33:53,353 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:53,353 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:53,353 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:53,355 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:53,355 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:53,356 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,356 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6f7509f72aec4597af9b7584eeaa89fa, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=35.9 K 2024-11-12T19:33:53,357 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f7509f72aec4597af9b7584eeaa89fa, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731440030081 2024-11-12T19:33:53,357 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb7acee2d05f4c4e9f63414f7a57206f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1731440031280 2024-11-12T19:33:53,357 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ef267fcfeb44b1e97ff5d492630de91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:53,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742076_1252 (size=12595) 2024-11-12T19:33:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742077_1253 (size=14541) 2024-11-12T19:33:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-12T19:33:53,381 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-12T19:33:53,382 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#207 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:53,382 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4d5e1fc91db64b74843c2595899cabee is 50, key is test_row_0/C:col10/1731440032436/Put/seqid=0 2024-11-12T19:33:53,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-12T19:33:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:53,386 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:53,387 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:53,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:53,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440093385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440093386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,390 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/f257381a1cd2409a891ad45d695d0330 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f257381a1cd2409a891ad45d695d0330 2024-11-12T19:33:53,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440093389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,400 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into f257381a1cd2409a891ad45d695d0330(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:53,400 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:53,400 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440033243; duration=0sec 2024-11-12T19:33:53,400 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:53,400 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:53,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742078_1254 (size=12595) 2024-11-12T19:33:53,431 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4d5e1fc91db64b74843c2595899cabee as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d5e1fc91db64b74843c2595899cabee 2024-11-12T19:33:53,442 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 4d5e1fc91db64b74843c2595899cabee(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:53,442 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:53,442 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440033243; duration=0sec 2024-11-12T19:33:53,442 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:53,442 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:53,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440093450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:53,543 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:53,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-12T19:33:53,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:53,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440093593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440093599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440093603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:53,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:53,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-12T19:33:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/66f93090fbca43b38d4ec3940b369550 2024-11-12T19:33:53,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/90dc3958016e48de99b1d21ed9468dbd is 50, key is test_row_0/B:col10/1731440033258/Put/seqid=0 2024-11-12T19:33:53,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742079_1255 (size=12151) 2024-11-12T19:33:53,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/90dc3958016e48de99b1d21ed9468dbd 2024-11-12T19:33:53,861 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:53,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-12T19:33:53,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/96d305ddbd3b457cb76171023ab28fd1 is 50, key is test_row_0/C:col10/1731440033258/Put/seqid=0 2024-11-12T19:33:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:53,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:53,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440093896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742080_1256 (size=12151) 2024-11-12T19:33:53,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/96d305ddbd3b457cb76171023ab28fd1 2024-11-12T19:33:53,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440093905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:53,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440093908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:53,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/66f93090fbca43b38d4ec3940b369550 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550 2024-11-12T19:33:53,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550, entries=200, sequenceid=209, filesize=14.2 K 2024-11-12T19:33:53,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/90dc3958016e48de99b1d21ed9468dbd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd 2024-11-12T19:33:53,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd, entries=150, sequenceid=209, filesize=11.9 K 2024-11-12T19:33:53,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/96d305ddbd3b457cb76171023ab28fd1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1 2024-11-12T19:33:53,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1, entries=150, sequenceid=209, filesize=11.9 K 2024-11-12T19:33:53,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 2056d7413c228b8ad5515802b19e3905 in 694ms, sequenceid=209, compaction requested=false 2024-11-12T19:33:53,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:54,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:54,022 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:54,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-12T19:33:54,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:54,022 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:54,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/16b773112e594fef83e4734031b48f75 is 50, key is test_row_0/A:col10/1731440033273/Put/seqid=0 2024-11-12T19:33:54,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742081_1257 (size=12151) 2024-11-12T19:33:54,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:54,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:54,461 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/16b773112e594fef83e4734031b48f75 2024-11-12T19:33:54,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30a5b0263cc741619d882e6b19b455d3 is 50, key is test_row_0/B:col10/1731440033273/Put/seqid=0 2024-11-12T19:33:54,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440094476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440094476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440094479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:54,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742082_1258 (size=12151) 2024-11-12T19:33:54,539 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30a5b0263cc741619d882e6b19b455d3 2024-11-12T19:33:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/065c4b7ad09b481d888b4ca644d85230 is 50, key is test_row_0/C:col10/1731440033273/Put/seqid=0 2024-11-12T19:33:54,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440094582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440094582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742083_1259 (size=12151) 2024-11-12T19:33:54,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440094599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440094788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440094788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:54,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:54,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440094811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,003 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/065c4b7ad09b481d888b4ca644d85230 2024-11-12T19:33:55,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/16b773112e594fef83e4734031b48f75 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75 2024-11-12T19:33:55,036 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75, entries=150, sequenceid=224, filesize=11.9 K 2024-11-12T19:33:55,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30a5b0263cc741619d882e6b19b455d3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3 2024-11-12T19:33:55,044 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3, entries=150, sequenceid=224, filesize=11.9 K 2024-11-12T19:33:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/065c4b7ad09b481d888b4ca644d85230 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230 2024-11-12T19:33:55,055 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230, entries=150, sequenceid=224, filesize=11.9 K 2024-11-12T19:33:55,056 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2056d7413c228b8ad5515802b19e3905 in 1033ms, sequenceid=224, compaction requested=true 2024-11-12T19:33:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-12T19:33:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-12T19:33:55,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-12T19:33:55,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6690 sec 2024-11-12T19:33:55,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.6740 sec 2024-11-12T19:33:55,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:55,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-12T19:33:55,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:55,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:55,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:55,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/354292babc214e2cb0000de8849a6a2c is 50, key is test_row_0/A:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742084_1260 (size=12151) 2024-11-12T19:33:55,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440095129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440095129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440095133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440095238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440095239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440095243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440095442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440095443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440095449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440095457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,462 DEBUG [Thread-999 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-12T19:33:55,515 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-12T19:33:55,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-12T19:33:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:55,527 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:55,528 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:55,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:55,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/354292babc214e2cb0000de8849a6a2c 2024-11-12T19:33:55,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/502ad8ba7db0403989eafb5c27cbbe59 is 50, key is test_row_0/B:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742085_1261 (size=12151) 2024-11-12T19:33:55,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/502ad8ba7db0403989eafb5c27cbbe59 2024-11-12T19:33:55,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b41972da923c48bf959b1790a71e2575 is 50, key is test_row_0/C:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742086_1262 (size=12151) 2024-11-12T19:33:55,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b41972da923c48bf959b1790a71e2575 2024-11-12T19:33:55,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/354292babc214e2cb0000de8849a6a2c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c 2024-11-12T19:33:55,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c, entries=150, sequenceid=250, filesize=11.9 K 2024-11-12T19:33:55,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/502ad8ba7db0403989eafb5c27cbbe59 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59 2024-11-12T19:33:55,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:55,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:55,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:55,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59, entries=150, sequenceid=250, filesize=11.9 K 2024-11-12T19:33:55,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b41972da923c48bf959b1790a71e2575 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575 2024-11-12T19:33:55,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575, entries=150, sequenceid=250, filesize=11.9 K 2024-11-12T19:33:55,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 2056d7413c228b8ad5515802b19e3905 in 584ms, sequenceid=250, compaction requested=true 2024-11-12T19:33:55,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:55,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:55,701 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:55,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:55,701 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:55,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:55,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:55,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:55,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:55,702 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51438 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:55,703 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:55,703 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,703 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/55f63bac7a464269a6e23ce5335d095f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=50.2 K 2024-11-12T19:33:55,703 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:55,703 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:55,703 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,704 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f257381a1cd2409a891ad45d695d0330, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=47.9 K 2024-11-12T19:33:55,705 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55f63bac7a464269a6e23ce5335d095f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:55,705 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f257381a1cd2409a891ad45d695d0330, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:55,706 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66f93090fbca43b38d4ec3940b369550, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731440033114 2024-11-12T19:33:55,706 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 90dc3958016e48de99b1d21ed9468dbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731440033114 2024-11-12T19:33:55,707 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16b773112e594fef83e4734031b48f75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1731440033273 2024-11-12T19:33:55,707 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 30a5b0263cc741619d882e6b19b455d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1731440033273 2024-11-12T19:33:55,708 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 502ad8ba7db0403989eafb5c27cbbe59, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:55,708 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 354292babc214e2cb0000de8849a6a2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:55,744 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:55,745 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/eb7df9ac45ca4c2fb8643b7243d4a060 is 50, key is test_row_0/B:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,753 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:55,753 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/552b917d8f2c40d5a7d331816ccb4a98 is 50, key is test_row_0/A:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:55,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742087_1263 (size=12731) 2024-11-12T19:33:55,789 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/eb7df9ac45ca4c2fb8643b7243d4a060 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/eb7df9ac45ca4c2fb8643b7243d4a060 2024-11-12T19:33:55,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/71255dd027b14619804cbfb53c5b53b0 is 50, key is test_row_0/A:col10/1731440035754/Put/seqid=0 2024-11-12T19:33:55,794 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into eb7df9ac45ca4c2fb8643b7243d4a060(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:55,794 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:55,794 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=12, startTime=1731440035701; duration=0sec 2024-11-12T19:33:55,795 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:55,795 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:55,795 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:33:55,797 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:33:55,798 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:55,798 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,798 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d5e1fc91db64b74843c2595899cabee, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=47.9 K 2024-11-12T19:33:55,798 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d5e1fc91db64b74843c2595899cabee, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731440032422 2024-11-12T19:33:55,798 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 96d305ddbd3b457cb76171023ab28fd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731440033114 2024-11-12T19:33:55,799 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 065c4b7ad09b481d888b4ca644d85230, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1731440033273 2024-11-12T19:33:55,800 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b41972da923c48bf959b1790a71e2575, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:55,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742088_1264 (size=12731) 2024-11-12T19:33:55,826 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/552b917d8f2c40d5a7d331816ccb4a98 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/552b917d8f2c40d5a7d331816ccb4a98 2024-11-12T19:33:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:55,836 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 552b917d8f2c40d5a7d331816ccb4a98(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:55,836 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:55,836 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=12, startTime=1731440035700; duration=0sec 2024-11-12T19:33:55,836 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:55,837 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:55,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742089_1265 (size=17181) 2024-11-12T19:33:55,840 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:55,841 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/7a81437737964b48976615c5764470da is 50, key is test_row_0/C:col10/1731440034445/Put/seqid=0 2024-11-12T19:33:55,847 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:55,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:55,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:55,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:55,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/71255dd027b14619804cbfb53c5b53b0 2024-11-12T19:33:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:55,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440095858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440095855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440095861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742090_1266 (size=12731) 2024-11-12T19:33:55,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/323b80984db74ae080d7a7fa4c4d8358 is 50, key is test_row_0/B:col10/1731440035754/Put/seqid=0 2024-11-12T19:33:55,882 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/7a81437737964b48976615c5764470da as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7a81437737964b48976615c5764470da 2024-11-12T19:33:55,891 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 7a81437737964b48976615c5764470da(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:55,891 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:55,891 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=12, startTime=1731440035701; duration=0sec 2024-11-12T19:33:55,891 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:55,891 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742091_1267 (size=12301) 2024-11-12T19:33:55,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440095968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440095971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:55,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440095975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:56,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,174 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440096172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440096177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440096182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/323b80984db74ae080d7a7fa4c4d8358 2024-11-12T19:33:56,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/a64fe4fffad046a09a5e4c05a8e99ef8 is 50, key is test_row_0/C:col10/1731440035754/Put/seqid=0 2024-11-12T19:33:56,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742092_1268 (size=12301) 2024-11-12T19:33:56,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/a64fe4fffad046a09a5e4c05a8e99ef8 2024-11-12T19:33:56,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/71255dd027b14619804cbfb53c5b53b0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0 2024-11-12T19:33:56,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0, entries=250, sequenceid=263, filesize=16.8 K 2024-11-12T19:33:56,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/323b80984db74ae080d7a7fa4c4d8358 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358 2024-11-12T19:33:56,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:33:56,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/a64fe4fffad046a09a5e4c05a8e99ef8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8 2024-11-12T19:33:56,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:33:56,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 2056d7413c228b8ad5515802b19e3905 in 650ms, sequenceid=263, compaction requested=false 2024-11-12T19:33:56,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:56,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:56,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:56,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:56,486 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/76c39709948348b3bad7f05952f96e69 is 50, key is test_row_0/A:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:56,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440096494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440096495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440096496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742093_1269 (size=12301) 2024-11-12T19:33:56,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/76c39709948348b3bad7f05952f96e69 2024-11-12T19:33:56,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30edcad58c5141d2b2e49fbb3b028dde is 50, key is test_row_0/B:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742094_1270 (size=12301) 2024-11-12T19:33:56,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440096598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440096599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440096600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:56,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440096802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440096805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:56,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440096814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:56,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30edcad58c5141d2b2e49fbb3b028dde 2024-11-12T19:33:56,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/18c0239373d04bb9bad0970a06ecbd91 is 50, key is test_row_0/C:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:56,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:56,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:56,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:56,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:56,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742095_1271 (size=12301) 2024-11-12T19:33:57,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440097118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440097118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:57,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:57,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440097130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,278 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:57,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/18c0239373d04bb9bad0970a06ecbd91 2024-11-12T19:33:57,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/76c39709948348b3bad7f05952f96e69 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69 2024-11-12T19:33:57,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69, entries=150, sequenceid=291, filesize=12.0 K 2024-11-12T19:33:57,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/30edcad58c5141d2b2e49fbb3b028dde as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde 2024-11-12T19:33:57,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde, entries=150, sequenceid=291, filesize=12.0 K 2024-11-12T19:33:57,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/18c0239373d04bb9bad0970a06ecbd91 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91 2024-11-12T19:33:57,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:57,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91, entries=150, sequenceid=291, filesize=12.0 K 2024-11-12T19:33:57,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,432 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2056d7413c228b8ad5515802b19e3905 in 950ms, sequenceid=291, compaction requested=true 2024-11-12T19:33:57,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:57,433 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:57,433 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:57,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:57,435 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:57,435 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42213 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:57,435 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:57,435 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:57,435 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,435 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,435 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/552b917d8f2c40d5a7d331816ccb4a98, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=41.2 K 2024-11-12T19:33:57,435 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/eb7df9ac45ca4c2fb8643b7243d4a060, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.5 K 2024-11-12T19:33:57,436 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting eb7df9ac45ca4c2fb8643b7243d4a060, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:57,436 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 552b917d8f2c40d5a7d331816ccb4a98, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:57,436 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 323b80984db74ae080d7a7fa4c4d8358, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440035754 2024-11-12T19:33:57,436 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71255dd027b14619804cbfb53c5b53b0, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440035128 2024-11-12T19:33:57,437 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 30edcad58c5141d2b2e49fbb3b028dde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:57,437 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76c39709948348b3bad7f05952f96e69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:57,456 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:57,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/5f5d9763adee416ab41b0e549714f351 is 50, key is test_row_0/A:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:57,457 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:57,458 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/912c9489745348eea6570b5708f07e37 is 50, key is test_row_0/B:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742096_1272 (size=12983) 2024-11-12T19:33:57,472 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/5f5d9763adee416ab41b0e549714f351 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/5f5d9763adee416ab41b0e549714f351 2024-11-12T19:33:57,477 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 5f5d9763adee416ab41b0e549714f351(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:57,477 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:57,477 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440037432; duration=0sec 2024-11-12T19:33:57,477 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:57,477 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:57,477 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:57,479 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:57,479 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:57,479 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,479 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7a81437737964b48976615c5764470da, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.5 K 2024-11-12T19:33:57,479 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a81437737964b48976615c5764470da, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731440034445 2024-11-12T19:33:57,480 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a64fe4fffad046a09a5e4c05a8e99ef8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440035754 2024-11-12T19:33:57,480 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18c0239373d04bb9bad0970a06ecbd91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742097_1273 (size=12983) 2024-11-12T19:33:57,492 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/912c9489745348eea6570b5708f07e37 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/912c9489745348eea6570b5708f07e37 2024-11-12T19:33:57,496 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#227 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:57,497 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b1a1a008106c43548d20d988ae355ffe is 50, key is test_row_0/C:col10/1731440036480/Put/seqid=0 2024-11-12T19:33:57,498 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 912c9489745348eea6570b5708f07e37(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:57,498 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:57,498 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440037433; duration=0sec 2024-11-12T19:33:57,498 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:57,498 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:57,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742098_1274 (size=12983) 2024-11-12T19:33:57,583 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:57,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-12T19:33:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:57,584 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:57,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/38fa1e220ae94634bcaba5e694112689 is 50, key is test_row_0/A:col10/1731440036492/Put/seqid=0 2024-11-12T19:33:57,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742099_1275 (size=12301) 2024-11-12T19:33:57,598 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/38fa1e220ae94634bcaba5e694112689 2024-11-12T19:33:57,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 is 50, key is test_row_0/B:col10/1731440036492/Put/seqid=0 2024-11-12T19:33:57,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742100_1276 (size=12301) 2024-11-12T19:33:57,612 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 2024-11-12T19:33:57,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b43dc3f886b34a4eb48e3367ab2486b0 is 50, key is test_row_0/C:col10/1731440036492/Put/seqid=0 2024-11-12T19:33:57,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742101_1277 (size=12301) 2024-11-12T19:33:57,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:57,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:57,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440097657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440097659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440097660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440097761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440097762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440097763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,909 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b1a1a008106c43548d20d988ae355ffe as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b1a1a008106c43548d20d988ae355ffe 2024-11-12T19:33:57,915 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into b1a1a008106c43548d20d988ae355ffe(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:57,915 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:57,915 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440037433; duration=0sec 2024-11-12T19:33:57,915 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:57,915 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:57,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440097964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440097965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:57,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440097966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,026 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b43dc3f886b34a4eb48e3367ab2486b0 2024-11-12T19:33:58,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/38fa1e220ae94634bcaba5e694112689 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689 2024-11-12T19:33:58,040 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689, entries=150, sequenceid=304, filesize=12.0 K 2024-11-12T19:33:58,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 2024-11-12T19:33:58,045 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0, entries=150, sequenceid=304, filesize=12.0 K 2024-11-12T19:33:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b43dc3f886b34a4eb48e3367ab2486b0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0 2024-11-12T19:33:58,054 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0, entries=150, sequenceid=304, filesize=12.0 K 2024-11-12T19:33:58,055 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 2056d7413c228b8ad5515802b19e3905 in 472ms, sequenceid=304, compaction requested=false 2024-11-12T19:33:58,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:58,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:58,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-12T19:33:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-12T19:33:58,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-12T19:33:58,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5280 sec 2024-11-12T19:33:58,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.5370 sec 2024-11-12T19:33:58,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:58,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/e476abf9ce444703b0fc29a810499ae4 is 50, key is test_row_0/A:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440098276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440098279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440098280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742102_1278 (size=12301) 2024-11-12T19:33:58,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/e476abf9ce444703b0fc29a810499ae4 2024-11-12T19:33:58,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/460eefb08489407d92372a58b772116e is 50, key is test_row_0/B:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742103_1279 (size=12301) 2024-11-12T19:33:58,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/460eefb08489407d92372a58b772116e 2024-11-12T19:33:58,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b0bca428b3764514abba55d48aad2d1e is 50, key is test_row_0/C:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440098381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440098383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440098383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742104_1280 (size=12301) 2024-11-12T19:33:58,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440098584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440098588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440098598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:58,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b0bca428b3764514abba55d48aad2d1e 2024-11-12T19:33:58,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/e476abf9ce444703b0fc29a810499ae4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4 2024-11-12T19:33:58,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4, entries=150, sequenceid=334, filesize=12.0 K 2024-11-12T19:33:58,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/460eefb08489407d92372a58b772116e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e 2024-11-12T19:33:58,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e, entries=150, sequenceid=334, filesize=12.0 K 2024-11-12T19:33:58,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/b0bca428b3764514abba55d48aad2d1e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e 2024-11-12T19:33:58,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e, entries=150, sequenceid=334, filesize=12.0 K 2024-11-12T19:33:58,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 2056d7413c228b8ad5515802b19e3905 in 566ms, sequenceid=334, compaction requested=true 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:58,836 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:58,836 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:33:58,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:58,838 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:58,838 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:58,838 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:33:58,838 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:33:58,838 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:58,838 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/912c9489745348eea6570b5708f07e37, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.7 K 2024-11-12T19:33:58,839 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:58,839 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/5f5d9763adee416ab41b0e549714f351, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.7 K 2024-11-12T19:33:58,839 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 912c9489745348eea6570b5708f07e37, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:58,839 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f5d9763adee416ab41b0e549714f351, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:58,840 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b2d38bec7234cf3904f0d9fd0ccd3b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731440036492 2024-11-12T19:33:58,840 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 460eefb08489407d92372a58b772116e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:33:58,841 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38fa1e220ae94634bcaba5e694112689, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731440036492 2024-11-12T19:33:58,842 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e476abf9ce444703b0fc29a810499ae4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:33:58,854 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:58,855 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/4c26c42db17c47a885d4b371c7fe9fab is 50, key is test_row_0/B:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,860 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:58,861 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/2407d4e5a236466aa4acb78d5efec81b is 50, key is test_row_0/A:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742105_1281 (size=13085) 2024-11-12T19:33:58,899 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/4c26c42db17c47a885d4b371c7fe9fab as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4c26c42db17c47a885d4b371c7fe9fab 2024-11-12T19:33:58,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742106_1282 (size=13085) 2024-11-12T19:33:58,911 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 4c26c42db17c47a885d4b371c7fe9fab(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:58,911 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:58,911 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440038836; duration=0sec 2024-11-12T19:33:58,913 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:33:58,913 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:33:58,914 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:33:58,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:58,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:58,916 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:33:58,916 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:33:58,917 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/2407d4e5a236466aa4acb78d5efec81b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2407d4e5a236466aa4acb78d5efec81b 2024-11-12T19:33:58,921 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:58,921 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b1a1a008106c43548d20d988ae355ffe, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.7 K 2024-11-12T19:33:58,922 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b1a1a008106c43548d20d988ae355ffe, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440035850 2024-11-12T19:33:58,922 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b43dc3f886b34a4eb48e3367ab2486b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731440036492 2024-11-12T19:33:58,922 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b0bca428b3764514abba55d48aad2d1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:33:58,924 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 2407d4e5a236466aa4acb78d5efec81b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:58,924 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:58,924 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440038836; duration=0sec 2024-11-12T19:33:58,925 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:58,925 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:33:58,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/46b1e8c3355b4f9b8bfe649b22b2a328 is 50, key is test_row_0/A:col10/1731440038908/Put/seqid=0 2024-11-12T19:33:58,938 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:33:58,940 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/9c3a18f25e824e27b711d2bc4c623171 is 50, key is test_row_0/C:col10/1731440038270/Put/seqid=0 2024-11-12T19:33:58,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742108_1284 (size=13085) 2024-11-12T19:33:58,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742107_1283 (size=12301) 2024-11-12T19:33:59,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/46b1e8c3355b4f9b8bfe649b22b2a328 2024-11-12T19:33:59,380 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/9c3a18f25e824e27b711d2bc4c623171 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9c3a18f25e824e27b711d2bc4c623171 2024-11-12T19:33:59,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/747e0431fbc4411c9a6c4768184ee49d is 50, key is test_row_0/B:col10/1731440038908/Put/seqid=0 2024-11-12T19:33:59,407 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 9c3a18f25e824e27b711d2bc4c623171(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:33:59,407 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:59,408 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440038836; duration=0sec 2024-11-12T19:33:59,408 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:33:59,408 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:33:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742109_1285 (size=12301) 2024-11-12T19:33:59,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/747e0431fbc4411c9a6c4768184ee49d 2024-11-12T19:33:59,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/6693fc26369045b78754f64b218b67b3 is 50, key is test_row_0/C:col10/1731440038908/Put/seqid=0 2024-11-12T19:33:59,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43400 deadline: 1731440099478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,480 DEBUG [Thread-999 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:33:59,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742110_1286 (size=12301) 2024-11-12T19:33:59,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/6693fc26369045b78754f64b218b67b3 2024-11-12T19:33:59,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/46b1e8c3355b4f9b8bfe649b22b2a328 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328 2024-11-12T19:33:59,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328, entries=150, sequenceid=348, filesize=12.0 K 2024-11-12T19:33:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/747e0431fbc4411c9a6c4768184ee49d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d 2024-11-12T19:33:59,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d, entries=150, sequenceid=348, filesize=12.0 K 2024-11-12T19:33:59,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/6693fc26369045b78754f64b218b67b3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3 2024-11-12T19:33:59,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3, entries=150, sequenceid=348, filesize=12.0 K 2024-11-12T19:33:59,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 2056d7413c228b8ad5515802b19e3905 in 717ms, sequenceid=348, compaction requested=false 2024-11-12T19:33:59,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:33:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:33:59,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:33:59,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:33:59,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:59,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:33:59,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:59,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:33:59,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:33:59,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-12T19:33:59,651 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-12T19:33:59,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:33:59,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/0ea8464259764de28f70d34b56767d5e is 50, key is test_row_0/A:col10/1731440039632/Put/seqid=0 2024-11-12T19:33:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-12T19:33:59,678 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:33:59,679 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:33:59,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:33:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-12T19:33:59,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742111_1287 (size=12301) 2024-11-12T19:33:59,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/0ea8464259764de28f70d34b56767d5e 2024-11-12T19:33:59,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/e9778361edaf4ab5b6db4ab328775883 is 50, key is test_row_0/B:col10/1731440039632/Put/seqid=0 2024-11-12T19:33:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-12T19:33:59,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:33:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742112_1288 (size=12301) 2024-11-12T19:33:59,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/e9778361edaf4ab5b6db4ab328775883 2024-11-12T19:33:59,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:59,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-12T19:33:59,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:59,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:59,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:59,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:59,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:59,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/f70050de1d7e491fb0ba6b2cce73a62a is 50, key is test_row_0/C:col10/1731440039632/Put/seqid=0 2024-11-12T19:33:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742113_1289 (size=12301) 2024-11-12T19:33:59,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/f70050de1d7e491fb0ba6b2cce73a62a 2024-11-12T19:33:59,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/0ea8464259764de28f70d34b56767d5e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e 2024-11-12T19:33:59,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e, entries=150, sequenceid=378, filesize=12.0 K 2024-11-12T19:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/e9778361edaf4ab5b6db4ab328775883 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883 2024-11-12T19:33:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-12T19:33:59,988 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:33:59,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-12T19:33:59,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:59,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:33:59,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:33:59,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:59,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:33:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:00,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440099996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440099996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440099997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883, entries=150, sequenceid=378, filesize=12.0 K 2024-11-12T19:34:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/f70050de1d7e491fb0ba6b2cce73a62a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a 2024-11-12T19:34:00,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a, entries=150, sequenceid=378, filesize=12.0 K 2024-11-12T19:34:00,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 2056d7413c228b8ad5515802b19e3905 in 396ms, sequenceid=378, compaction requested=true 2024-11-12T19:34:00,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:00,040 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:00,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:00,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:00,040 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:00,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:00,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:00,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:00,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:00,045 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:00,045 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:34:00,046 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,046 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4c26c42db17c47a885d4b371c7fe9fab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.8 K 2024-11-12T19:34:00,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:00,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:34:00,049 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,049 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2407d4e5a236466aa4acb78d5efec81b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.8 K 2024-11-12T19:34:00,050 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c26c42db17c47a885d4b371c7fe9fab, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:34:00,051 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2407d4e5a236466aa4acb78d5efec81b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:34:00,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 747e0431fbc4411c9a6c4768184ee49d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1731440038892 2024-11-12T19:34:00,052 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46b1e8c3355b4f9b8bfe649b22b2a328, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1731440038892 2024-11-12T19:34:00,052 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e9778361edaf4ab5b6db4ab328775883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:00,057 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ea8464259764de28f70d34b56767d5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:00,122 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:00,123 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/1cbd3c2b192f4464a58275ab62967011 is 50, key is test_row_0/A:col10/1731440039632/Put/seqid=0 2024-11-12T19:34:00,128 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:00,128 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/1d14764f193d4b159ba3324dc13a3134 is 50, key is test_row_0/B:col10/1731440039632/Put/seqid=0 2024-11-12T19:34:00,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:00,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,149 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/da2d3d914c1a439f8b35a74a540c03b2 is 50, key is test_row_0/A:col10/1731440039670/Put/seqid=0 2024-11-12T19:34:00,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742115_1291 (size=13187) 2024-11-12T19:34:00,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742114_1290 (size=13187) 2024-11-12T19:34:00,207 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/1d14764f193d4b159ba3324dc13a3134 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1d14764f193d4b159ba3324dc13a3134 2024-11-12T19:34:00,208 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/1cbd3c2b192f4464a58275ab62967011 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1cbd3c2b192f4464a58275ab62967011 2024-11-12T19:34:00,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742116_1292 (size=12301) 2024-11-12T19:34:00,234 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/da2d3d914c1a439f8b35a74a540c03b2 2024-11-12T19:34:00,237 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 1d14764f193d4b159ba3324dc13a3134(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:00,237 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:00,238 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440040040; duration=0sec 2024-11-12T19:34:00,238 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:00,238 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:34:00,238 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:00,246 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 1cbd3c2b192f4464a58275ab62967011(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:00,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:00,246 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440040040; duration=0sec 2024-11-12T19:34:00,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:00,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:34:00,247 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:00,247 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:34:00,247 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,247 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9c3a18f25e824e27b711d2bc4c623171, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.8 K 2024-11-12T19:34:00,252 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c3a18f25e824e27b711d2bc4c623171, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1731440037659 2024-11-12T19:34:00,255 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6693fc26369045b78754f64b218b67b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1731440038892 2024-11-12T19:34:00,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/8c23d4fe93614b028e07470f42bdbaf3 is 50, key is test_row_0/B:col10/1731440039670/Put/seqid=0 2024-11-12T19:34:00,260 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f70050de1d7e491fb0ba6b2cce73a62a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-12T19:34:00,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742117_1293 (size=12301) 2024-11-12T19:34:00,301 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/8c23d4fe93614b028e07470f42bdbaf3 2024-11-12T19:34:00,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:00,309 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:00,309 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/09ab7e2c4874484782011536f0d62206 is 50, key is test_row_0/C:col10/1731440039632/Put/seqid=0 2024-11-12T19:34:00,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/d9bb813668544ebabab62435e14f4996 is 50, key is test_row_0/C:col10/1731440039670/Put/seqid=0 2024-11-12T19:34:00,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742118_1294 (size=13187) 2024-11-12T19:34:00,365 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/09ab7e2c4874484782011536f0d62206 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09ab7e2c4874484782011536f0d62206 2024-11-12T19:34:00,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440100362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440100362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440100364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742119_1295 (size=12301) 2024-11-12T19:34:00,370 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/d9bb813668544ebabab62435e14f4996 2024-11-12T19:34:00,385 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 09ab7e2c4874484782011536f0d62206(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:00,385 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:00,385 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440040041; duration=0sec 2024-11-12T19:34:00,385 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:00,385 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:34:00,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/da2d3d914c1a439f8b35a74a540c03b2 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2 2024-11-12T19:34:00,391 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2, entries=150, sequenceid=387, filesize=12.0 K 2024-11-12T19:34:00,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/8c23d4fe93614b028e07470f42bdbaf3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3 2024-11-12T19:34:00,395 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3, entries=150, sequenceid=387, filesize=12.0 K 2024-11-12T19:34:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/d9bb813668544ebabab62435e14f4996 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996 2024-11-12T19:34:00,400 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996, entries=150, sequenceid=387, filesize=12.0 K 2024-11-12T19:34:00,401 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 2056d7413c228b8ad5515802b19e3905 in 252ms, sequenceid=387, compaction requested=false 2024-11-12T19:34:00,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:00,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-12T19:34:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-12T19:34:00,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-12T19:34:00,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 723 msec 2024-11-12T19:34:00,404 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 733 msec 2024-11-12T19:34:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:00,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:34:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:00,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:00,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/abeaf7100cac480fa3cd0c0b89754e0f is 50, key is test_row_0/A:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:00,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742120_1296 (size=14741) 2024-11-12T19:34:00,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440100495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440100495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440100498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440100599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440100600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440100604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-12T19:34:00,784 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-12T19:34:00,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-12T19:34:00,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:00,793 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:00,795 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:00,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:00,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440100803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440100806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:00,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440100806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:00,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/abeaf7100cac480fa3cd0c0b89754e0f 2024-11-12T19:34:00,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/f0a1dcffef80448886b432fd526a69fc is 50, key is test_row_0/B:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:00,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742121_1297 (size=12301) 2024-11-12T19:34:00,947 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:00,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-12T19:34:00,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:00,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:01,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:01,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-12T19:34:01,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:01,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440101111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440101111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440101111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,260 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:01,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-12T19:34:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:01,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/f0a1dcffef80448886b432fd526a69fc 2024-11-12T19:34:01,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/7b01e669af324e50b9d0ceb8778a4655 is 50, key is test_row_0/C:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742122_1298 (size=12301) 2024-11-12T19:34:01,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/7b01e669af324e50b9d0ceb8778a4655 2024-11-12T19:34:01,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/abeaf7100cac480fa3cd0c0b89754e0f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f 2024-11-12T19:34:01,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f, entries=200, sequenceid=418, filesize=14.4 K 2024-11-12T19:34:01,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/f0a1dcffef80448886b432fd526a69fc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc 2024-11-12T19:34:01,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc, entries=150, sequenceid=418, filesize=12.0 K 2024-11-12T19:34:01,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/7b01e669af324e50b9d0ceb8778a4655 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655 2024-11-12T19:34:01,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655, entries=150, sequenceid=418, filesize=12.0 K 2024-11-12T19:34:01,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 2056d7413c228b8ad5515802b19e3905 in 917ms, sequenceid=418, compaction requested=true 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:01,386 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:01,386 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:01,387 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:01,387 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:01,387 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:34:01,387 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:34:01,387 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,387 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,387 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1cbd3c2b192f4464a58275ab62967011, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=39.3 K 2024-11-12T19:34:01,387 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1d14764f193d4b159ba3324dc13a3134, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.9 K 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cbd3c2b192f4464a58275ab62967011, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d14764f193d4b159ba3324dc13a3134, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting da2d3d914c1a439f8b35a74a540c03b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1731440039653 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c23d4fe93614b028e07470f42bdbaf3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1731440039653 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting abeaf7100cac480fa3cd0c0b89754e0f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040356 2024-11-12T19:34:01,388 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f0a1dcffef80448886b432fd526a69fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040358 2024-11-12T19:34:01,395 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:01,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:01,396 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/325979bc74a741d384baeb8dd4a06c97 is 50, key is test_row_0/A:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:01,398 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:01,398 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/d07a18b5cb424574b796ab6650dddbdc is 50, key is test_row_0/B:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:01,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742124_1300 (size=13289) 2024-11-12T19:34:01,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742123_1299 (size=13289) 2024-11-12T19:34:01,417 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:01,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-12T19:34:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,419 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:34:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:01,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/2b63c346b78846bea19675acec4eb29f is 50, key is test_row_0/A:col10/1731440040492/Put/seqid=0 2024-11-12T19:34:01,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742125_1301 (size=12301) 2024-11-12T19:34:01,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:01,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:01,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440101658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440101658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440101661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440101762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440101763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440101767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,813 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/d07a18b5cb424574b796ab6650dddbdc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d07a18b5cb424574b796ab6650dddbdc 2024-11-12T19:34:01,829 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/325979bc74a741d384baeb8dd4a06c97 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/325979bc74a741d384baeb8dd4a06c97 2024-11-12T19:34:01,835 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into d07a18b5cb424574b796ab6650dddbdc(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:01,835 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:01,835 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440041386; duration=0sec 2024-11-12T19:34:01,835 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:01,835 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:34:01,835 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:01,836 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/2b63c346b78846bea19675acec4eb29f 2024-11-12T19:34:01,838 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:01,838 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:34:01,838 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:01,838 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09ab7e2c4874484782011536f0d62206, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=36.9 K 2024-11-12T19:34:01,839 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ab7e2c4874484782011536f0d62206, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1731440039631 2024-11-12T19:34:01,840 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d9bb813668544ebabab62435e14f4996, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1731440039653 2024-11-12T19:34:01,841 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b01e669af324e50b9d0ceb8778a4655, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040358 2024-11-12T19:34:01,845 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 325979bc74a741d384baeb8dd4a06c97(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:01,845 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:01,846 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440041386; duration=0sec 2024-11-12T19:34:01,846 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:01,846 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:34:01,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7909536534ec4774a6920d4ad7ebd834 is 50, key is test_row_0/B:col10/1731440040492/Put/seqid=0 2024-11-12T19:34:01,887 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:01,888 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/09151b155b2d40839f06c46c361840d3 is 50, key is test_row_0/C:col10/1731440040467/Put/seqid=0 2024-11-12T19:34:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:01,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742126_1302 (size=12301) 2024-11-12T19:34:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742127_1303 (size=13289) 2024-11-12T19:34:01,933 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/09151b155b2d40839f06c46c361840d3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09151b155b2d40839f06c46c361840d3 2024-11-12T19:34:01,943 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 09151b155b2d40839f06c46c361840d3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:01,943 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:01,944 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440041386; duration=0sec 2024-11-12T19:34:01,944 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:01,944 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:34:01,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440101964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440101966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:01,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:01,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440101974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440102267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440102272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440102278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,304 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7909536534ec4774a6920d4ad7ebd834 2024-11-12T19:34:02,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/1366d4eef3e444e0a83c45e07db7f78e is 50, key is test_row_0/C:col10/1731440040492/Put/seqid=0 2024-11-12T19:34:02,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742128_1304 (size=12301) 2024-11-12T19:34:02,318 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/1366d4eef3e444e0a83c45e07db7f78e 2024-11-12T19:34:02,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/2b63c346b78846bea19675acec4eb29f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f 2024-11-12T19:34:02,338 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f, entries=150, sequenceid=427, filesize=12.0 K 2024-11-12T19:34:02,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/7909536534ec4774a6920d4ad7ebd834 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834 2024-11-12T19:34:02,345 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834, entries=150, sequenceid=427, filesize=12.0 K 2024-11-12T19:34:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/1366d4eef3e444e0a83c45e07db7f78e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e 2024-11-12T19:34:02,351 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e, entries=150, sequenceid=427, filesize=12.0 K 2024-11-12T19:34:02,353 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 2056d7413c228b8ad5515802b19e3905 in 934ms, sequenceid=427, compaction requested=false 2024-11-12T19:34:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-12T19:34:02,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-12T19:34:02,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-12T19:34:02,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5600 sec 2024-11-12T19:34:02,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.5720 sec 2024-11-12T19:34:02,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:02,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:02,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:02,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440102796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440102796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440102798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/6242dff7f5304703ba89659a23178475 is 50, key is test_row_0/A:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742129_1305 (size=12301) 2024-11-12T19:34:02,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/6242dff7f5304703ba89659a23178475 2024-11-12T19:34:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-12T19:34:02,901 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-12T19:34:02,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440102901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440102902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:02,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440102906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-12T19:34:02,912 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:02,913 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:02,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:02,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/9c7f8cd0b2b34bbd8be7d77643afcd11 is 50, key is test_row_0/B:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:02,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742130_1306 (size=12301) 2024-11-12T19:34:02,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/9c7f8cd0b2b34bbd8be7d77643afcd11 2024-11-12T19:34:02,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4d21671568fd416989d6c7bdfa26cd4b is 50, key is test_row_0/C:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742131_1307 (size=12301) 2024-11-12T19:34:02,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4d21671568fd416989d6c7bdfa26cd4b 2024-11-12T19:34:03,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/6242dff7f5304703ba89659a23178475 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475 2024-11-12T19:34:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:03,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475, entries=150, sequenceid=458, filesize=12.0 K 2024-11-12T19:34:03,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/9c7f8cd0b2b34bbd8be7d77643afcd11 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11 2024-11-12T19:34:03,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11, entries=150, sequenceid=458, filesize=12.0 K 2024-11-12T19:34:03,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4d21671568fd416989d6c7bdfa26cd4b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b 2024-11-12T19:34:03,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b, entries=150, sequenceid=458, filesize=12.0 K 2024-11-12T19:34:03,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 2056d7413c228b8ad5515802b19e3905 in 263ms, sequenceid=458, compaction requested=true 2024-11-12T19:34:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:03,040 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:03,041 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:03,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:03,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:03,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:03,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:03,046 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:03,046 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:34:03,046 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:03,046 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d07a18b5cb424574b796ab6650dddbdc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=37.0 K 2024-11-12T19:34:03,046 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:03,046 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:34:03,046 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:03,047 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/325979bc74a741d384baeb8dd4a06c97, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=37.0 K 2024-11-12T19:34:03,047 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d07a18b5cb424574b796ab6650dddbdc, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040358 2024-11-12T19:34:03,047 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 325979bc74a741d384baeb8dd4a06c97, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040358 2024-11-12T19:34:03,048 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7909536534ec4774a6920d4ad7ebd834, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1731440040473 2024-11-12T19:34:03,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b63c346b78846bea19675acec4eb29f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1731440040473 2024-11-12T19:34:03,049 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7f8cd0b2b34bbd8be7d77643afcd11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:03,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6242dff7f5304703ba89659a23178475, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:03,062 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#261 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:03,063 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/8a73c3750aff4dd6be12fcc2034e473f is 50, key is test_row_0/A:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:03,065 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:03,066 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/d6a0566228114d87ad47972c7f5e0405 is 50, key is test_row_0/B:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:03,067 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:03,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-12T19:34:03,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:03,069 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:03,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:03,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/90fb624ca5cd4e9d993f1e54740b5bea is 50, key is test_row_0/A:col10/1731440042791/Put/seqid=0 2024-11-12T19:34:03,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:03,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742133_1309 (size=13391) 2024-11-12T19:34:03,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742132_1308 (size=13391) 2024-11-12T19:34:03,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742134_1310 (size=9857) 2024-11-12T19:34:03,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440103194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440103197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440103197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:03,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440103302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440103303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440103306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43404 deadline: 1731440103372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,376 DEBUG [Thread-1005 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18302 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:34:03,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440103507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:03,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440103514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440103515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,533 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/d6a0566228114d87ad47972c7f5e0405 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d6a0566228114d87ad47972c7f5e0405 2024-11-12T19:34:03,535 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/8a73c3750aff4dd6be12fcc2034e473f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8a73c3750aff4dd6be12fcc2034e473f 2024-11-12T19:34:03,543 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into d6a0566228114d87ad47972c7f5e0405(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:03,543 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:03,543 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440043040; duration=0sec 2024-11-12T19:34:03,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:03,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:34:03,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:03,544 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 8a73c3750aff4dd6be12fcc2034e473f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:03,545 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:03,545 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440043040; duration=0sec 2024-11-12T19:34:03,545 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:03,545 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:34:03,546 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:03,546 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:34:03,546 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:03,546 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09151b155b2d40839f06c46c361840d3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=37.0 K 2024-11-12T19:34:03,547 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 09151b155b2d40839f06c46c361840d3, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1731440040358 2024-11-12T19:34:03,547 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1366d4eef3e444e0a83c45e07db7f78e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1731440040473 2024-11-12T19:34:03,548 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d21671568fd416989d6c7bdfa26cd4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:03,551 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/90fb624ca5cd4e9d993f1e54740b5bea 2024-11-12T19:34:03,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/dd83f93e0b9544f88ef215cfdde861e8 is 50, key is test_row_0/B:col10/1731440042791/Put/seqid=0 2024-11-12T19:34:03,589 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:03,590 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4a699c7a156e4631bb66458740c9ed55 is 50, key is test_row_0/C:col10/1731440041659/Put/seqid=0 2024-11-12T19:34:03,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742135_1311 (size=9857) 2024-11-12T19:34:03,647 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/dd83f93e0b9544f88ef215cfdde861e8 2024-11-12T19:34:03,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742136_1312 (size=13391) 2024-11-12T19:34:03,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/e3321863bb8849c39115c7ab2f7ee574 is 50, key is test_row_0/C:col10/1731440042791/Put/seqid=0 2024-11-12T19:34:03,694 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/4a699c7a156e4631bb66458740c9ed55 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4a699c7a156e4631bb66458740c9ed55 2024-11-12T19:34:03,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742137_1313 (size=9857) 2024-11-12T19:34:03,723 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 4a699c7a156e4631bb66458740c9ed55(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:03,723 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440043041; duration=0sec 2024-11-12T19:34:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:03,723 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:34:03,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440103818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440103820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:03,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440103828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:34:04,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:04,116 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/e3321863bb8849c39115c7ab2f7ee574 2024-11-12T19:34:04,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/90fb624ca5cd4e9d993f1e54740b5bea as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea 2024-11-12T19:34:04,143 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea, entries=100, sequenceid=467, filesize=9.6 K 2024-11-12T19:34:04,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/dd83f93e0b9544f88ef215cfdde861e8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8 2024-11-12T19:34:04,150 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8, entries=100, sequenceid=467, filesize=9.6 K 2024-11-12T19:34:04,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/e3321863bb8849c39115c7ab2f7ee574 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574 2024-11-12T19:34:04,156 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574, entries=100, sequenceid=467, filesize=9.6 K 2024-11-12T19:34:04,159 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 2056d7413c228b8ad5515802b19e3905 in 1091ms, sequenceid=467, compaction requested=false 2024-11-12T19:34:04,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:04,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:04,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-12T19:34:04,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-12T19:34:04,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-12T19:34:04,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2540 sec 2024-11-12T19:34:04,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.2610 sec 2024-11-12T19:34:04,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:04,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:04,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440104354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/9dfba013b4bd4f4a9a9dfc59d0df796f is 50, key is test_row_0/A:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:04,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440104355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440104359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742138_1314 (size=12301) 2024-11-12T19:34:04,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/9dfba013b4bd4f4a9a9dfc59d0df796f 2024-11-12T19:34:04,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/72199f537a514e768964752606f228b5 is 50, key is test_row_0/B:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:04,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742139_1315 (size=12301) 2024-11-12T19:34:04,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440104463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/72199f537a514e768964752606f228b5 2024-11-12T19:34:04,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440104470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440104479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0d846f84e52e4f429f2207f72cca88a4 is 50, key is test_row_0/C:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742140_1316 (size=12301) 2024-11-12T19:34:04,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43360 deadline: 1731440104669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43386 deadline: 1731440104675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:04,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43362 deadline: 1731440104682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:04,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0d846f84e52e4f429f2207f72cca88a4 2024-11-12T19:34:04,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/9dfba013b4bd4f4a9a9dfc59d0df796f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f 2024-11-12T19:34:04,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f, entries=150, sequenceid=498, filesize=12.0 K 2024-11-12T19:34:04,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/72199f537a514e768964752606f228b5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5 2024-11-12T19:34:04,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5, entries=150, sequenceid=498, filesize=12.0 K 2024-11-12T19:34:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/0d846f84e52e4f429f2207f72cca88a4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4 2024-11-12T19:34:04,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4, entries=150, sequenceid=498, filesize=12.0 K 2024-11-12T19:34:04,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 2056d7413c228b8ad5515802b19e3905 in 611ms, sequenceid=498, compaction requested=true 2024-11-12T19:34:04,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:04,938 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2056d7413c228b8ad5515802b19e3905:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:04,938 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:04,939 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:04,939 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/A is initiating minor compaction (all files) 2024-11-12T19:34:04,939 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/A in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:04,939 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8a73c3750aff4dd6be12fcc2034e473f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=34.7 K 2024-11-12T19:34:04,940 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:04,940 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a73c3750aff4dd6be12fcc2034e473f, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:04,940 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/B is initiating minor compaction (all files) 2024-11-12T19:34:04,940 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/B in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:04,940 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d6a0566228114d87ad47972c7f5e0405, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=34.7 K 2024-11-12T19:34:04,940 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90fb624ca5cd4e9d993f1e54740b5bea, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1731440042791 2024-11-12T19:34:04,940 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d6a0566228114d87ad47972c7f5e0405, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:04,941 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9dfba013b4bd4f4a9a9dfc59d0df796f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1731440043190 2024-11-12T19:34:04,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting dd83f93e0b9544f88ef215cfdde861e8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1731440042791 2024-11-12T19:34:04,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 72199f537a514e768964752606f228b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1731440043190 2024-11-12T19:34:04,957 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#B#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:04,957 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/13ef06f3ed294903b440299b72cf3317 is 50, key is test_row_0/B:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:04,968 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#A#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:04,969 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/149bb56fa8e44da28fbc4644187f8d53 is 50, key is test_row_0/A:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:04,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:04,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742141_1317 (size=13493) 2024-11-12T19:34:05,003 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/13ef06f3ed294903b440299b72cf3317 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/13ef06f3ed294903b440299b72cf3317 2024-11-12T19:34:05,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/1b771f345b5a45f2af5370e0f1ec0d37 is 50, key is test_row_0/A:col10/1731440044990/Put/seqid=0 2024-11-12T19:34:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-12T19:34:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742142_1318 (size=13493) 2024-11-12T19:34:05,019 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-12T19:34:05,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:05,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-12T19:34:05,026 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:05,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:05,026 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:05,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:05,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742143_1319 (size=14741) 2024-11-12T19:34:05,058 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/B of 2056d7413c228b8ad5515802b19e3905 into 13ef06f3ed294903b440299b72cf3317(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:05,058 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:05,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/1b771f345b5a45f2af5370e0f1ec0d37 2024-11-12T19:34:05,058 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/B, priority=13, startTime=1731440044938; duration=0sec 2024-11-12T19:34:05,058 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:05,059 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:B 2024-11-12T19:34:05,059 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:05,066 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:05,066 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 2056d7413c228b8ad5515802b19e3905/C is initiating minor compaction (all files) 2024-11-12T19:34:05,066 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2056d7413c228b8ad5515802b19e3905/C in TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,067 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4a699c7a156e4631bb66458740c9ed55, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp, totalSize=34.7 K 2024-11-12T19:34:05,069 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a699c7a156e4631bb66458740c9ed55, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1731440041657 2024-11-12T19:34:05,070 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e3321863bb8849c39115c7ab2f7ee574, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1731440042791 2024-11-12T19:34:05,070 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d846f84e52e4f429f2207f72cca88a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1731440043190 2024-11-12T19:34:05,075 DEBUG [Thread-1016 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3be398a9 to 127.0.0.1:60358 2024-11-12T19:34:05,075 DEBUG [Thread-1016 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,075 DEBUG [Thread-1008 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f50b381 to 127.0.0.1:60358 2024-11-12T19:34:05,075 DEBUG [Thread-1008 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/bcaf2eed5bba45f39dfb74d77668e6d0 is 50, key is test_row_0/B:col10/1731440044990/Put/seqid=0 2024-11-12T19:34:05,078 DEBUG [Thread-1012 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x712d7bc3 to 127.0.0.1:60358 2024-11-12T19:34:05,078 DEBUG [Thread-1012 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,081 DEBUG [Thread-1001 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x195206da to 127.0.0.1:60358 2024-11-12T19:34:05,081 DEBUG [Thread-997 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3eec6530 to 127.0.0.1:60358 2024-11-12T19:34:05,081 DEBUG [Thread-1003 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x282318cf to 127.0.0.1:60358 2024-11-12T19:34:05,081 DEBUG [Thread-997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,081 DEBUG [Thread-1001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,081 DEBUG [Thread-1003 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,083 DEBUG [Thread-1014 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40da73c1 to 127.0.0.1:60358 2024-11-12T19:34:05,083 DEBUG [Thread-1014 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,084 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2056d7413c228b8ad5515802b19e3905#C#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:05,085 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/94141eaeb77d4395a65758a6bb16d186 is 50, key is test_row_0/C:col10/1731440043190/Put/seqid=0 2024-11-12T19:34:05,086 DEBUG [Thread-1010 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x124edab0 to 127.0.0.1:60358 2024-11-12T19:34:05,087 DEBUG [Thread-1010 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742145_1321 (size=13493) 2024-11-12T19:34:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742144_1320 (size=12301) 2024-11-12T19:34:05,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/bcaf2eed5bba45f39dfb74d77668e6d0 2024-11-12T19:34:05,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/413da3bbea9a4bfea763bf36f9d78133 is 50, key is test_row_0/C:col10/1731440044990/Put/seqid=0 2024-11-12T19:34:05,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742146_1322 (size=12301) 2024-11-12T19:34:05,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:05,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-12T19:34:05,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:05,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:05,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-12T19:34:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,424 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/149bb56fa8e44da28fbc4644187f8d53 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/149bb56fa8e44da28fbc4644187f8d53 2024-11-12T19:34:05,428 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/A of 2056d7413c228b8ad5515802b19e3905 into 149bb56fa8e44da28fbc4644187f8d53(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:05,428 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:05,428 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/A, priority=13, startTime=1731440044938; duration=0sec 2024-11-12T19:34:05,428 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:05,428 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:A 2024-11-12T19:34:05,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:05,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-12T19:34:05,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. as already flushing 2024-11-12T19:34:05,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:05,506 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/94141eaeb77d4395a65758a6bb16d186 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/94141eaeb77d4395a65758a6bb16d186 2024-11-12T19:34:05,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/413da3bbea9a4bfea763bf36f9d78133 2024-11-12T19:34:05,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/1b771f345b5a45f2af5370e0f1ec0d37 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1b771f345b5a45f2af5370e0f1ec0d37 2024-11-12T19:34:05,535 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2056d7413c228b8ad5515802b19e3905/C of 2056d7413c228b8ad5515802b19e3905 into 94141eaeb77d4395a65758a6bb16d186(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:05,536 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:05,536 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905., storeName=2056d7413c228b8ad5515802b19e3905/C, priority=13, startTime=1731440044938; duration=0sec 2024-11-12T19:34:05,536 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:05,536 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2056d7413c228b8ad5515802b19e3905:C 2024-11-12T19:34:05,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1b771f345b5a45f2af5370e0f1ec0d37, entries=200, sequenceid=513, filesize=14.4 K 2024-11-12T19:34:05,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/bcaf2eed5bba45f39dfb74d77668e6d0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/bcaf2eed5bba45f39dfb74d77668e6d0 2024-11-12T19:34:05,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/bcaf2eed5bba45f39dfb74d77668e6d0, entries=150, sequenceid=513, filesize=12.0 K 2024-11-12T19:34:05,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/413da3bbea9a4bfea763bf36f9d78133 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/413da3bbea9a4bfea763bf36f9d78133 2024-11-12T19:34:05,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/413da3bbea9a4bfea763bf36f9d78133, entries=150, sequenceid=513, filesize=12.0 K 2024-11-12T19:34:05,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=73.80 KB/75570 for 2056d7413c228b8ad5515802b19e3905 in 565ms, sequenceid=513, compaction requested=false 2024-11-12T19:34:05,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:05,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:05,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-12T19:34:05,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:05,637 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:05,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:05,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/788ee78178f7479785586f45859a964e is 50, key is test_row_0/A:col10/1731440045071/Put/seqid=0 2024-11-12T19:34:05,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742147_1323 (size=12301) 2024-11-12T19:34:06,047 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/788ee78178f7479785586f45859a964e 2024-11-12T19:34:06,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/3abef304f2e34cd2a627fa513851f685 is 50, key is test_row_0/B:col10/1731440045071/Put/seqid=0 2024-11-12T19:34:06,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742148_1324 (size=12301) 2024-11-12T19:34:06,058 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/3abef304f2e34cd2a627fa513851f685 2024-11-12T19:34:06,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c2b548b49b064ad8be88c52b4cdb7e18 is 50, key is test_row_0/C:col10/1731440045071/Put/seqid=0 2024-11-12T19:34:06,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742149_1325 (size=12301) 2024-11-12T19:34:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:06,469 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c2b548b49b064ad8be88c52b4cdb7e18 2024-11-12T19:34:06,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/788ee78178f7479785586f45859a964e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/788ee78178f7479785586f45859a964e 2024-11-12T19:34:06,476 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/788ee78178f7479785586f45859a964e, entries=150, sequenceid=530, filesize=12.0 K 2024-11-12T19:34:06,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/3abef304f2e34cd2a627fa513851f685 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/3abef304f2e34cd2a627fa513851f685 2024-11-12T19:34:06,481 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/3abef304f2e34cd2a627fa513851f685, entries=150, sequenceid=530, filesize=12.0 K 2024-11-12T19:34:06,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/c2b548b49b064ad8be88c52b4cdb7e18 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c2b548b49b064ad8be88c52b4cdb7e18 2024-11-12T19:34:06,485 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c2b548b49b064ad8be88c52b4cdb7e18, entries=150, sequenceid=530, filesize=12.0 K 2024-11-12T19:34:06,486 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 2056d7413c228b8ad5515802b19e3905 in 849ms, sequenceid=530, compaction requested=true 2024-11-12T19:34:06,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:06,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:06,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-12T19:34:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-12T19:34:06,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-12T19:34:06,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4610 sec 2024-11-12T19:34:06,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.4640 sec 2024-11-12T19:34:07,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-12T19:34:07,131 INFO [Thread-1007 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-12T19:34:09,562 DEBUG [Thread-999 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dc273c3 to 127.0.0.1:60358 2024-11-12T19:34:09,562 DEBUG [Thread-999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:13,448 DEBUG [Thread-1005 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ea91426 to 127.0.0.1:60358 2024-11-12T19:34:13,448 DEBUG [Thread-1005 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 144 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 130 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4175 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4174 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4126 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4165 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4184 2024-11-12T19:34:13,448 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:34:13,448 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:34:13,449 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x267e0963 to 127.0.0.1:60358 2024-11-12T19:34:13,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:13,449 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:34:13,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:34:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:13,452 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440053452"}]},"ts":"1731440053452"} 2024-11-12T19:34:13,453 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:34:13,474 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:34:13,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:34:13,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, UNASSIGN}] 2024-11-12T19:34:13,476 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, UNASSIGN 2024-11-12T19:34:13,477 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=2056d7413c228b8ad5515802b19e3905, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:13,477 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:34:13,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; CloseRegionProcedure 2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:13,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:13,630 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(124): Close 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1681): Closing 2056d7413c228b8ad5515802b19e3905, disabling compactions & flushes 2024-11-12T19:34:13,630 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. after waiting 0 ms 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:13,630 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(2837): Flushing 2056d7413c228b8ad5515802b19e3905 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=A 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=B 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2056d7413c228b8ad5515802b19e3905, store=C 2024-11-12T19:34:13,630 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:13,634 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/de2d24f241234f28ab81d79723832f55 is 50, key is test_row_0/A:col10/1731440049561/Put/seqid=0 2024-11-12T19:34:13,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742150_1326 (size=9857) 2024-11-12T19:34:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:13,951 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:14,038 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/de2d24f241234f28ab81d79723832f55 2024-11-12T19:34:14,046 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/680b8d381ef74491964237bfc529c96c is 50, key is test_row_0/B:col10/1731440049561/Put/seqid=0 2024-11-12T19:34:14,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742151_1327 (size=9857) 2024-11-12T19:34:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:14,452 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/680b8d381ef74491964237bfc529c96c 2024-11-12T19:34:14,458 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eddc59edc22348fda9c1c200849e248e is 50, key is test_row_0/C:col10/1731440049561/Put/seqid=0 2024-11-12T19:34:14,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742152_1328 (size=9857) 2024-11-12T19:34:14,463 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eddc59edc22348fda9c1c200849e248e 2024-11-12T19:34:14,466 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/A/de2d24f241234f28ab81d79723832f55 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/de2d24f241234f28ab81d79723832f55 2024-11-12T19:34:14,469 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/de2d24f241234f28ab81d79723832f55, entries=100, sequenceid=535, filesize=9.6 K 2024-11-12T19:34:14,469 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/B/680b8d381ef74491964237bfc529c96c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/680b8d381ef74491964237bfc529c96c 2024-11-12T19:34:14,472 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/680b8d381ef74491964237bfc529c96c, entries=100, sequenceid=535, filesize=9.6 K 2024-11-12T19:34:14,472 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/.tmp/C/eddc59edc22348fda9c1c200849e248e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eddc59edc22348fda9c1c200849e248e 2024-11-12T19:34:14,475 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eddc59edc22348fda9c1c200849e248e, entries=100, sequenceid=535, filesize=9.6 K 2024-11-12T19:34:14,476 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 2056d7413c228b8ad5515802b19e3905 in 846ms, sequenceid=535, compaction requested=true 2024-11-12T19:34:14,476 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a29264486d3640a5b416603a59a166c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8486c2ed5a4f4e088264f7f7a341571b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/c84a0d8c959947898a36393abb5f6f7e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/55f63bac7a464269a6e23ce5335d095f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/552b917d8f2c40d5a7d331816ccb4a98, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/5f5d9763adee416ab41b0e549714f351, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2407d4e5a236466aa4acb78d5efec81b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1cbd3c2b192f4464a58275ab62967011, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/325979bc74a741d384baeb8dd4a06c97, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8a73c3750aff4dd6be12fcc2034e473f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f] to archive 2024-11-12T19:34:14,477 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:14,479 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/563394327a2c405d979be5aabb4f7d06 2024-11-12T19:34:14,480 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/29fe637f2dbc4f6596b76e90096b3658 2024-11-12T19:34:14,481 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/bbd0cecd40074543b557d61a279e7885 2024-11-12T19:34:14,482 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a29264486d3640a5b416603a59a166c7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a29264486d3640a5b416603a59a166c7 2024-11-12T19:34:14,483 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/fc7d4fb1157e4af8bac77ad423ddd6c7 2024-11-12T19:34:14,484 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/a0fc1b462ea84a2d9f559ed106a7a982 2024-11-12T19:34:14,486 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8486c2ed5a4f4e088264f7f7a341571b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8486c2ed5a4f4e088264f7f7a341571b 2024-11-12T19:34:14,487 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/01ce0cf6e7e744ee8adb46c5d8e66698 2024-11-12T19:34:14,488 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/4b4739fe7a384dafae478747a2993eca 2024-11-12T19:34:14,488 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/c84a0d8c959947898a36393abb5f6f7e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/c84a0d8c959947898a36393abb5f6f7e 2024-11-12T19:34:14,489 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/ea1c2b9a7a2e4188b2d66dfea1fa7b65 2024-11-12T19:34:14,490 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7c26b9ecc66b48148252698704ed7f3e 2024-11-12T19:34:14,491 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/7316216055ae41498cf6e5a4dd2b84bc 2024-11-12T19:34:14,492 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/55f63bac7a464269a6e23ce5335d095f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/55f63bac7a464269a6e23ce5335d095f 2024-11-12T19:34:14,493 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/66f93090fbca43b38d4ec3940b369550 2024-11-12T19:34:14,493 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/16b773112e594fef83e4734031b48f75 2024-11-12T19:34:14,494 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/552b917d8f2c40d5a7d331816ccb4a98 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/552b917d8f2c40d5a7d331816ccb4a98 2024-11-12T19:34:14,495 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/354292babc214e2cb0000de8849a6a2c 2024-11-12T19:34:14,496 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/71255dd027b14619804cbfb53c5b53b0 2024-11-12T19:34:14,497 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/5f5d9763adee416ab41b0e549714f351 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/5f5d9763adee416ab41b0e549714f351 2024-11-12T19:34:14,497 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/76c39709948348b3bad7f05952f96e69 2024-11-12T19:34:14,498 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/38fa1e220ae94634bcaba5e694112689 2024-11-12T19:34:14,500 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2407d4e5a236466aa4acb78d5efec81b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2407d4e5a236466aa4acb78d5efec81b 2024-11-12T19:34:14,501 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/e476abf9ce444703b0fc29a810499ae4 2024-11-12T19:34:14,501 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/46b1e8c3355b4f9b8bfe649b22b2a328 2024-11-12T19:34:14,502 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1cbd3c2b192f4464a58275ab62967011 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1cbd3c2b192f4464a58275ab62967011 2024-11-12T19:34:14,503 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/0ea8464259764de28f70d34b56767d5e 2024-11-12T19:34:14,504 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/da2d3d914c1a439f8b35a74a540c03b2 2024-11-12T19:34:14,505 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/abeaf7100cac480fa3cd0c0b89754e0f 2024-11-12T19:34:14,506 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/325979bc74a741d384baeb8dd4a06c97 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/325979bc74a741d384baeb8dd4a06c97 2024-11-12T19:34:14,506 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/2b63c346b78846bea19675acec4eb29f 2024-11-12T19:34:14,507 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8a73c3750aff4dd6be12fcc2034e473f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/8a73c3750aff4dd6be12fcc2034e473f 2024-11-12T19:34:14,508 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/6242dff7f5304703ba89659a23178475 2024-11-12T19:34:14,509 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/90fb624ca5cd4e9d993f1e54740b5bea 2024-11-12T19:34:14,510 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/9dfba013b4bd4f4a9a9dfc59d0df796f 2024-11-12T19:34:14,511 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e1ed743c73c34f17bc1df4ade133ae40, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/02c0a87c93474e8bb56e5e1e37819086, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6a318b88e728467ebd01d2d69b49ca94, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f257381a1cd2409a891ad45d695d0330, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/eb7df9ac45ca4c2fb8643b7243d4a060, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/912c9489745348eea6570b5708f07e37, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4c26c42db17c47a885d4b371c7fe9fab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1d14764f193d4b159ba3324dc13a3134, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d07a18b5cb424574b796ab6650dddbdc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d6a0566228114d87ad47972c7f5e0405, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5] to archive 2024-11-12T19:34:14,512 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:14,514 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7c8c9b1276494de7b82515d50e9a8f84 2024-11-12T19:34:14,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/ab679382c9a74b67ab586367e8c25c07 2024-11-12T19:34:14,516 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/2f29f12c9c794157b871e3482ff723be 2024-11-12T19:34:14,517 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e1ed743c73c34f17bc1df4ade133ae40 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e1ed743c73c34f17bc1df4ade133ae40 2024-11-12T19:34:14,519 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/96aafb17fec84cf1b0314dbec39d2819 2024-11-12T19:34:14,520 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/882eab8c227e42d39df6e9efb00e922a 2024-11-12T19:34:14,521 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/02c0a87c93474e8bb56e5e1e37819086 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/02c0a87c93474e8bb56e5e1e37819086 2024-11-12T19:34:14,521 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6119d70657c34f43ab9576cd16ab00cd 2024-11-12T19:34:14,522 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/29c501b02c134832a8b0909edb620961 2024-11-12T19:34:14,523 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6a318b88e728467ebd01d2d69b49ca94 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6a318b88e728467ebd01d2d69b49ca94 2024-11-12T19:34:14,524 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/352d75fcd3cf4763b470365a8ac0b93c 2024-11-12T19:34:14,525 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1206ccb9f3144c7eb6bfd068095644e6 2024-11-12T19:34:14,526 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f257381a1cd2409a891ad45d695d0330 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f257381a1cd2409a891ad45d695d0330 2024-11-12T19:34:14,527 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/6bf35f21e27142949f95258df160c6b7 2024-11-12T19:34:14,528 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/90dc3958016e48de99b1d21ed9468dbd 2024-11-12T19:34:14,529 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30a5b0263cc741619d882e6b19b455d3 2024-11-12T19:34:14,530 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/eb7df9ac45ca4c2fb8643b7243d4a060 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/eb7df9ac45ca4c2fb8643b7243d4a060 2024-11-12T19:34:14,531 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/502ad8ba7db0403989eafb5c27cbbe59 2024-11-12T19:34:14,532 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/323b80984db74ae080d7a7fa4c4d8358 2024-11-12T19:34:14,533 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/912c9489745348eea6570b5708f07e37 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/912c9489745348eea6570b5708f07e37 2024-11-12T19:34:14,534 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/30edcad58c5141d2b2e49fbb3b028dde 2024-11-12T19:34:14,535 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4b2d38bec7234cf3904f0d9fd0ccd3b0 2024-11-12T19:34:14,536 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4c26c42db17c47a885d4b371c7fe9fab to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/4c26c42db17c47a885d4b371c7fe9fab 2024-11-12T19:34:14,536 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/460eefb08489407d92372a58b772116e 2024-11-12T19:34:14,537 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/747e0431fbc4411c9a6c4768184ee49d 2024-11-12T19:34:14,538 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1d14764f193d4b159ba3324dc13a3134 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/1d14764f193d4b159ba3324dc13a3134 2024-11-12T19:34:14,539 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/e9778361edaf4ab5b6db4ab328775883 2024-11-12T19:34:14,540 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/8c23d4fe93614b028e07470f42bdbaf3 2024-11-12T19:34:14,540 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d07a18b5cb424574b796ab6650dddbdc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d07a18b5cb424574b796ab6650dddbdc 2024-11-12T19:34:14,541 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/f0a1dcffef80448886b432fd526a69fc 2024-11-12T19:34:14,542 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/7909536534ec4774a6920d4ad7ebd834 2024-11-12T19:34:14,543 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d6a0566228114d87ad47972c7f5e0405 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/d6a0566228114d87ad47972c7f5e0405 2024-11-12T19:34:14,543 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/9c7f8cd0b2b34bbd8be7d77643afcd11 2024-11-12T19:34:14,544 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/dd83f93e0b9544f88ef215cfdde861e8 2024-11-12T19:34:14,545 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/72199f537a514e768964752606f228b5 2024-11-12T19:34:14,547 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a807fcc86e344abe9754fae2291d2740, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3d994e13dc094399afb108ff59d9a771, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6f7509f72aec4597af9b7584eeaa89fa, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d5e1fc91db64b74843c2595899cabee, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7a81437737964b48976615c5764470da, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b1a1a008106c43548d20d988ae355ffe, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9c3a18f25e824e27b711d2bc4c623171, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09ab7e2c4874484782011536f0d62206, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09151b155b2d40839f06c46c361840d3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4a699c7a156e4631bb66458740c9ed55, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4] to archive 2024-11-12T19:34:14,548 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:14,549 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c4f4ddb5bda64042bb3b21f330a87052 2024-11-12T19:34:14,550 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4622a2409e6145daa09a687cde406b1d 2024-11-12T19:34:14,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:14,555 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3def31eb4642475d9c1add240d114640 2024-11-12T19:34:14,557 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a807fcc86e344abe9754fae2291d2740 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a807fcc86e344abe9754fae2291d2740 2024-11-12T19:34:14,558 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a26f59694694522a3d9611756a4b85c 2024-11-12T19:34:14,559 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b953a9cf691c4baebb8b442f5374ba22 2024-11-12T19:34:14,560 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3d994e13dc094399afb108ff59d9a771 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/3d994e13dc094399afb108ff59d9a771 2024-11-12T19:34:14,561 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/adc2b80fd13f49d38d849963b1a18aec 2024-11-12T19:34:14,562 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/bd2c8368d47b4e36a2d8ffebc9db75c6 2024-11-12T19:34:14,565 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6f7509f72aec4597af9b7584eeaa89fa to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6f7509f72aec4597af9b7584eeaa89fa 2024-11-12T19:34:14,570 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0a740b701f1f4e219e65293d620a6175 2024-11-12T19:34:14,572 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eb7acee2d05f4c4e9f63414f7a57206f 2024-11-12T19:34:14,573 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d5e1fc91db64b74843c2595899cabee to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d5e1fc91db64b74843c2595899cabee 2024-11-12T19:34:14,574 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9ef267fcfeb44b1e97ff5d492630de91 2024-11-12T19:34:14,575 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/96d305ddbd3b457cb76171023ab28fd1 2024-11-12T19:34:14,576 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/065c4b7ad09b481d888b4ca644d85230 2024-11-12T19:34:14,577 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7a81437737964b48976615c5764470da to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7a81437737964b48976615c5764470da 2024-11-12T19:34:14,579 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b41972da923c48bf959b1790a71e2575 2024-11-12T19:34:14,581 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/a64fe4fffad046a09a5e4c05a8e99ef8 2024-11-12T19:34:14,582 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b1a1a008106c43548d20d988ae355ffe to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b1a1a008106c43548d20d988ae355ffe 2024-11-12T19:34:14,584 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/18c0239373d04bb9bad0970a06ecbd91 2024-11-12T19:34:14,585 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b43dc3f886b34a4eb48e3367ab2486b0 2024-11-12T19:34:14,590 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9c3a18f25e824e27b711d2bc4c623171 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/9c3a18f25e824e27b711d2bc4c623171 2024-11-12T19:34:14,593 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/b0bca428b3764514abba55d48aad2d1e 2024-11-12T19:34:14,597 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/6693fc26369045b78754f64b218b67b3 2024-11-12T19:34:14,599 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09ab7e2c4874484782011536f0d62206 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09ab7e2c4874484782011536f0d62206 2024-11-12T19:34:14,600 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/f70050de1d7e491fb0ba6b2cce73a62a 2024-11-12T19:34:14,601 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/d9bb813668544ebabab62435e14f4996 2024-11-12T19:34:14,602 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09151b155b2d40839f06c46c361840d3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/09151b155b2d40839f06c46c361840d3 2024-11-12T19:34:14,608 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/7b01e669af324e50b9d0ceb8778a4655 2024-11-12T19:34:14,611 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/1366d4eef3e444e0a83c45e07db7f78e 2024-11-12T19:34:14,612 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4a699c7a156e4631bb66458740c9ed55 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4a699c7a156e4631bb66458740c9ed55 2024-11-12T19:34:14,614 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/4d21671568fd416989d6c7bdfa26cd4b 2024-11-12T19:34:14,615 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/e3321863bb8849c39115c7ab2f7ee574 2024-11-12T19:34:14,617 DEBUG [StoreCloser-TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/0d846f84e52e4f429f2207f72cca88a4 2024-11-12T19:34:14,627 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/recovered.edits/538.seqid, newMaxSeqId=538, maxSeqId=1 2024-11-12T19:34:14,629 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905. 2024-11-12T19:34:14,629 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1635): Region close journal for 2056d7413c228b8ad5515802b19e3905: 2024-11-12T19:34:14,631 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(170): Closed 2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:14,631 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=2056d7413c228b8ad5515802b19e3905, regionState=CLOSED 2024-11-12T19:34:14,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-12T19:34:14,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseRegionProcedure 2056d7413c228b8ad5515802b19e3905, server=81d69e608036,33067,1731439956493 in 1.1550 sec 2024-11-12T19:34:14,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-11-12T19:34:14,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2056d7413c228b8ad5515802b19e3905, UNASSIGN in 1.1580 sec 2024-11-12T19:34:14,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-12T19:34:14,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1620 sec 2024-11-12T19:34:14,647 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440054647"}]},"ts":"1731440054647"} 2024-11-12T19:34:14,648 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:34:14,657 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:34:14,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2080 sec 2024-11-12T19:34:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-12T19:34:15,556 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-12T19:34:15,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:34:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,558 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=89, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-12T19:34:15,559 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=89, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,562 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:15,564 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/recovered.edits] 2024-11-12T19:34:15,566 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/149bb56fa8e44da28fbc4644187f8d53 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/149bb56fa8e44da28fbc4644187f8d53 2024-11-12T19:34:15,567 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1b771f345b5a45f2af5370e0f1ec0d37 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/1b771f345b5a45f2af5370e0f1ec0d37 2024-11-12T19:34:15,568 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/788ee78178f7479785586f45859a964e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/788ee78178f7479785586f45859a964e 2024-11-12T19:34:15,569 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/de2d24f241234f28ab81d79723832f55 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/A/de2d24f241234f28ab81d79723832f55 2024-11-12T19:34:15,571 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/13ef06f3ed294903b440299b72cf3317 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/13ef06f3ed294903b440299b72cf3317 2024-11-12T19:34:15,572 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/3abef304f2e34cd2a627fa513851f685 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/3abef304f2e34cd2a627fa513851f685 2024-11-12T19:34:15,573 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/680b8d381ef74491964237bfc529c96c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/680b8d381ef74491964237bfc529c96c 2024-11-12T19:34:15,573 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/bcaf2eed5bba45f39dfb74d77668e6d0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/B/bcaf2eed5bba45f39dfb74d77668e6d0 2024-11-12T19:34:15,575 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/413da3bbea9a4bfea763bf36f9d78133 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/413da3bbea9a4bfea763bf36f9d78133 2024-11-12T19:34:15,576 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/94141eaeb77d4395a65758a6bb16d186 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/94141eaeb77d4395a65758a6bb16d186 2024-11-12T19:34:15,577 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c2b548b49b064ad8be88c52b4cdb7e18 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/c2b548b49b064ad8be88c52b4cdb7e18 2024-11-12T19:34:15,578 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eddc59edc22348fda9c1c200849e248e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/C/eddc59edc22348fda9c1c200849e248e 2024-11-12T19:34:15,581 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/recovered.edits/538.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905/recovered.edits/538.seqid 2024-11-12T19:34:15,581 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/2056d7413c228b8ad5515802b19e3905 2024-11-12T19:34:15,581 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:34:15,583 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=89, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,586 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:34:15,588 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:34:15,589 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=89, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,589 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:34:15,589 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731440055589"}]},"ts":"9223372036854775807"} 2024-11-12T19:34:15,591 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:34:15,591 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2056d7413c228b8ad5515802b19e3905, NAME => 'TestAcidGuarantees,,1731440022712.2056d7413c228b8ad5515802b19e3905.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:34:15,591 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:34:15,591 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731440055591"}]},"ts":"9223372036854775807"} 2024-11-12T19:34:15,592 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:34:15,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-12T19:34:15,712 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=89, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 155 msec 2024-11-12T19:34:15,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-12T19:34:15,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-12T19:34:15,861 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-12T19:34:15,873 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=235 (was 240), OpenFileDescriptor=447 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1187 (was 1503), ProcessCount=11 (was 11), AvailableMemoryMB=1033 (was 1343) 2024-11-12T19:34:15,885 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=235, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=1187, ProcessCount=11, AvailableMemoryMB=1033 2024-11-12T19:34:15,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:34:15,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:34:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=90, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:15,888 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:34:15,888 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:15,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 90 2024-11-12T19:34:15,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:15,889 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:34:15,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742153_1329 (size=960) 2024-11-12T19:34:15,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:16,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:16,298 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:34:16,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742154_1330 (size=53) 2024-11-12T19:34:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:16,706 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:34:16,706 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 758f25fb434410405582dc106004e936, disabling compactions & flushes 2024-11-12T19:34:16,706 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:16,706 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:16,706 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. after waiting 0 ms 2024-11-12T19:34:16,707 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:16,707 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:16,707 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:16,709 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:34:16,709 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731440056709"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731440056709"}]},"ts":"1731440056709"} 2024-11-12T19:34:16,711 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:34:16,712 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:34:16,713 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440056713"}]},"ts":"1731440056713"} 2024-11-12T19:34:16,714 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:34:16,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, ASSIGN}] 2024-11-12T19:34:16,762 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, ASSIGN 2024-11-12T19:34:16,763 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:34:16,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:16,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; OpenRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:17,068 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:17,070 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:17,070 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7285): Opening region: {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:34:17,071 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,071 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:34:17,071 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7327): checking encryption for 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,071 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7330): checking classloading for 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,072 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,073 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:17,073 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName A 2024-11-12T19:34:17,073 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:17,074 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:17,074 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,074 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:17,075 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName B 2024-11-12T19:34:17,075 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:17,075 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:17,075 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,076 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:17,076 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName C 2024-11-12T19:34:17,076 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:17,077 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:17,077 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:17,077 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:17,078 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:17,079 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:34:17,080 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1085): writing seq id for 758f25fb434410405582dc106004e936 2024-11-12T19:34:17,082 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:34:17,082 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1102): Opened 758f25fb434410405582dc106004e936; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64751950, jitterRate=-0.035120755434036255}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:34:17,083 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1001): Region open journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:17,083 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., pid=92, masterSystemTime=1731440057067 2024-11-12T19:34:17,085 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:17,085 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:17,085 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:17,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-12T19:34:17,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; OpenRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 in 171 msec 2024-11-12T19:34:17,088 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-12T19:34:17,088 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, ASSIGN in 326 msec 2024-11-12T19:34:17,088 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:34:17,088 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440057088"}]},"ts":"1731440057088"} 2024-11-12T19:34:17,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:34:17,128 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:34:17,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2420 sec 2024-11-12T19:34:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-11-12T19:34:17,996 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 90 completed 2024-11-12T19:34:17,997 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x474d5947 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dacfd49 2024-11-12T19:34:18,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5271608e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:18,009 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:18,010 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:18,011 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:34:18,012 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:34:18,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:34:18,014 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:34:18,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:18,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742155_1331 (size=996) 2024-11-12T19:34:18,424 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-12T19:34:18,424 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-12T19:34:18,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:34:18,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, REOPEN/MOVE}] 2024-11-12T19:34:18,452 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, REOPEN/MOVE 2024-11-12T19:34:18,452 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:18,453 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:34:18,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:18,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:18,605 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:34:18,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 758f25fb434410405582dc106004e936, disabling compactions & flushes 2024-11-12T19:34:18,605 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. after waiting 0 ms 2024-11-12T19:34:18,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,608 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-12T19:34:18,609 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,609 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:18,609 WARN [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionServer(3786): Not adding moved region record: 758f25fb434410405582dc106004e936 to self. 2024-11-12T19:34:18,610 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,610 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=CLOSED 2024-11-12T19:34:18,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-12T19:34:18,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 in 158 msec 2024-11-12T19:34:18,613 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, REOPEN/MOVE; state=CLOSED, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=true 2024-11-12T19:34:18,763 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:18,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; OpenRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:18,915 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:18,918 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,918 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7285): Opening region: {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:34:18,919 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,919 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:34:18,919 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7327): checking encryption for 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,919 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7330): checking classloading for 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,920 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,921 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:18,921 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName A 2024-11-12T19:34:18,922 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:18,922 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:18,923 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,923 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:18,924 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName B 2024-11-12T19:34:18,924 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:18,924 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:18,924 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,925 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:18,925 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 758f25fb434410405582dc106004e936 columnFamilyName C 2024-11-12T19:34:18,925 DEBUG [StoreOpener-758f25fb434410405582dc106004e936-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:18,926 INFO [StoreOpener-758f25fb434410405582dc106004e936-1 {}] regionserver.HStore(327): Store=758f25fb434410405582dc106004e936/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:18,926 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,927 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:18,928 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:18,930 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:34:18,931 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1085): writing seq id for 758f25fb434410405582dc106004e936 2024-11-12T19:34:18,934 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1102): Opened 758f25fb434410405582dc106004e936; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70294534, jitterRate=0.04747018218040466}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:34:18,934 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1001): Region open journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:18,935 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., pid=97, masterSystemTime=1731440058915 2024-11-12T19:34:18,937 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,937 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:18,937 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=OPEN, openSeqNum=5, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:18,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-11-12T19:34:18,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; OpenRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 in 174 msec 2024-11-12T19:34:18,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-12T19:34:18,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, REOPEN/MOVE in 488 msec 2024-11-12T19:34:18,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-12T19:34:18,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 492 msec 2024-11-12T19:34:18,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 929 msec 2024-11-12T19:34:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-12T19:34:18,946 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2953086d to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33feebb 2024-11-12T19:34:19,011 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a139b42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,011 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2076b3ad to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c40db2e 2024-11-12T19:34:19,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1acf826f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,033 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20c5edec to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a86cb71 2024-11-12T19:34:19,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbce2b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,046 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x789089aa to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3401188a 2024-11-12T19:34:19,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fd3f5fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,058 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x699c96a7 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55650656 2024-11-12T19:34:19,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c97513, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,071 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c80a40c to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42af2962 2024-11-12T19:34:19,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4831febd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,083 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62cf69c5 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5910b8c7 2024-11-12T19:34:19,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e93614e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,091 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c35c7c4 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f9a05 2024-11-12T19:34:19,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@523025d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,100 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a259e93 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26b6d860 2024-11-12T19:34:19,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9a1701, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,114 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06179765 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@16722a1f 2024-11-12T19:34:19,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d3b05cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:19,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:19,130 DEBUG [hconnection-0x22a0fc30-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,131 DEBUG [hconnection-0x57033f81-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-11-12T19:34:19,132 DEBUG [hconnection-0x4d2b2d1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,132 DEBUG [hconnection-0x3779370a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,132 DEBUG [hconnection-0x3c1bd92d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,133 DEBUG [hconnection-0x3bcc6fa7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,133 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,133 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,133 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:19,133 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,133 DEBUG [hconnection-0xb99095c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:19,134 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:19,134 DEBUG [hconnection-0x1f5523b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:19,136 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,136 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,136 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,136 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36312, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,136 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,139 DEBUG [hconnection-0x1d6bb5cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,139 DEBUG [hconnection-0x7c513998-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:19,140 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:19,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:19,141 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:19,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440119170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440119170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440119170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440119171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440119171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120401cda48df246aa9022e0aa97d16a18_758f25fb434410405582dc106004e936 is 50, key is test_row_1/A:col10/1731440059140/Put/seqid=0 2024-11-12T19:34:19,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742156_1332 (size=9714) 2024-11-12T19:34:19,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:19,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:19,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:19,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:19,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:19,438 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:19,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:19,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:19,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440119475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440119475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440119475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440119476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440119477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,591 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:19,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:19,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:19,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,600 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:19,603 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120401cda48df246aa9022e0aa97d16a18_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120401cda48df246aa9022e0aa97d16a18_758f25fb434410405582dc106004e936 2024-11-12T19:34:19,604 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/bf47fea3631a4e369c29e92e0c768738, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:19,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/bf47fea3631a4e369c29e92e0c768738 is 175, key is test_row_1/A:col10/1731440059140/Put/seqid=0 2024-11-12T19:34:19,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742157_1333 (size=22361) 2024-11-12T19:34:19,628 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/bf47fea3631a4e369c29e92e0c768738 2024-11-12T19:34:19,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/92e2731190e04c2ab796458b59436c73 is 50, key is test_row_1/B:col10/1731440059140/Put/seqid=0 2024-11-12T19:34:19,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742158_1334 (size=9657) 2024-11-12T19:34:19,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/92e2731190e04c2ab796458b59436c73 2024-11-12T19:34:19,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d477e747894e4ed1bc7e0bfeaabe2fac is 50, key is test_row_1/C:col10/1731440059140/Put/seqid=0 2024-11-12T19:34:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742159_1335 (size=9657) 2024-11-12T19:34:19,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d477e747894e4ed1bc7e0bfeaabe2fac 2024-11-12T19:34:19,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/bf47fea3631a4e369c29e92e0c768738 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738 2024-11-12T19:34:19,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738, entries=100, sequenceid=15, filesize=21.8 K 2024-11-12T19:34:19,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:19,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:19,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/92e2731190e04c2ab796458b59436c73 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73 2024-11-12T19:34:19,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:19,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73, entries=100, sequenceid=15, filesize=9.4 K 2024-11-12T19:34:19,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d477e747894e4ed1bc7e0bfeaabe2fac as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac 2024-11-12T19:34:19,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac, entries=100, sequenceid=15, filesize=9.4 K 2024-11-12T19:34:19,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 758f25fb434410405582dc106004e936 in 611ms, sequenceid=15, compaction requested=false 2024-11-12T19:34:19,752 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-12T19:34:19,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:19,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:19,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:19,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440119784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440119785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440119785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440119786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440119787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111212fdb01409c644009daa8843123b0c61_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440059782/Put/seqid=0 2024-11-12T19:34:19,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742160_1336 (size=17034) 2024-11-12T19:34:19,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440119888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440119889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440119890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440119891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440119892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:19,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:19,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:19,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:19,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:19,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440120091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440120092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440120092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440120097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440120098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,165 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:34:20,195 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:20,199 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111212fdb01409c644009daa8843123b0c61_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212fdb01409c644009daa8843123b0c61_758f25fb434410405582dc106004e936 2024-11-12T19:34:20,200 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebc97ad9384249ea8bdb2952435adb5c, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:20,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebc97ad9384249ea8bdb2952435adb5c is 175, key is test_row_0/A:col10/1731440059782/Put/seqid=0 2024-11-12T19:34:20,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742161_1337 (size=48139) 2024-11-12T19:34:20,216 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebc97ad9384249ea8bdb2952435adb5c 2024-11-12T19:34:20,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:20,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/111f4ff521b34517bc554c0726866a8b is 50, key is test_row_0/B:col10/1731440059782/Put/seqid=0 2024-11-12T19:34:20,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742162_1338 (size=12001) 2024-11-12T19:34:20,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440120394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440120399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440120403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440120403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440120407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,667 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/111f4ff521b34517bc554c0726866a8b 2024-11-12T19:34:20,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c2f347d1c60b43fbaf18174938e1cae9 is 50, key is test_row_0/C:col10/1731440059782/Put/seqid=0 2024-11-12T19:34:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742163_1339 (size=12001) 2024-11-12T19:34:20,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440120902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440120907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440120908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440120910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440120912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:20,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:20,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:20,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:20,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:20,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:20,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:21,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:21,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:21,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:21,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:21,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:21,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:21,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:21,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:21,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c2f347d1c60b43fbaf18174938e1cae9 2024-11-12T19:34:21,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebc97ad9384249ea8bdb2952435adb5c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c 2024-11-12T19:34:21,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c, entries=250, sequenceid=43, filesize=47.0 K 2024-11-12T19:34:21,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/111f4ff521b34517bc554c0726866a8b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b 2024-11-12T19:34:21,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b, entries=150, sequenceid=43, filesize=11.7 K 2024-11-12T19:34:21,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c2f347d1c60b43fbaf18174938e1cae9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9 2024-11-12T19:34:21,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9, entries=150, sequenceid=43, filesize=11.7 K 2024-11-12T19:34:21,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 758f25fb434410405582dc106004e936 in 1452ms, sequenceid=43, compaction requested=false 2024-11-12T19:34:21,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:21,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:21,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-12T19:34:21,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:21,288 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-12T19:34:21,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:21,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:21,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112f5dd831204ae4550bc9613782a2ad141_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440059786/Put/seqid=0 2024-11-12T19:34:21,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742164_1340 (size=12154) 2024-11-12T19:34:21,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:21,756 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112f5dd831204ae4550bc9613782a2ad141_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112f5dd831204ae4550bc9613782a2ad141_758f25fb434410405582dc106004e936 2024-11-12T19:34:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/78be46fc33f140a7a82ec894fffe6af5, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:21,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/78be46fc33f140a7a82ec894fffe6af5 is 175, key is test_row_0/A:col10/1731440059786/Put/seqid=0 2024-11-12T19:34:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742165_1341 (size=30955) 2024-11-12T19:34:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:21,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:21,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:21,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440121965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:21,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:21,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:21,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440121969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:21,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440121971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:21,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:21,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440121974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:21,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:21,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440121974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440122077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440122085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440122085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440122085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440122093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,186 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/78be46fc33f140a7a82ec894fffe6af5 2024-11-12T19:34:22,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/1eb425dd9a77489cbee7e5c9792d11ea is 50, key is test_row_0/B:col10/1731440059786/Put/seqid=0 2024-11-12T19:34:22,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742166_1342 (size=12001) 2024-11-12T19:34:22,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440122289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440122295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440122295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440122297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440122303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440122605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440122605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440122605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440122619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440122631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:22,653 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/1eb425dd9a77489cbee7e5c9792d11ea 2024-11-12T19:34:22,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/9c1ae73ea6d647899e3b215616fd81d8 is 50, key is test_row_0/C:col10/1731440059786/Put/seqid=0 2024-11-12T19:34:22,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742167_1343 (size=12001) 2024-11-12T19:34:22,711 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/9c1ae73ea6d647899e3b215616fd81d8 2024-11-12T19:34:22,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/78be46fc33f140a7a82ec894fffe6af5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5 2024-11-12T19:34:22,756 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5, entries=150, sequenceid=51, filesize=30.2 K 2024-11-12T19:34:22,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/1eb425dd9a77489cbee7e5c9792d11ea as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea 2024-11-12T19:34:22,763 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea, entries=150, sequenceid=51, filesize=11.7 K 2024-11-12T19:34:22,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/9c1ae73ea6d647899e3b215616fd81d8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8 2024-11-12T19:34:22,775 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8, entries=150, sequenceid=51, filesize=11.7 K 2024-11-12T19:34:22,779 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 758f25fb434410405582dc106004e936 in 1491ms, sequenceid=51, compaction requested=true 2024-11-12T19:34:22,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:22,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:22,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-11-12T19:34:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-11-12T19:34:22,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-12T19:34:22,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6520 sec 2024-11-12T19:34:22,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 3.6750 sec 2024-11-12T19:34:23,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:23,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440123126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440123126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440123130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440123135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120287234240d44ccfba76ab8954665d37_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:23,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440123163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742168_1344 (size=12154) 2024-11-12T19:34:23,186 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:23,203 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120287234240d44ccfba76ab8954665d37_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120287234240d44ccfba76ab8954665d37_758f25fb434410405582dc106004e936 2024-11-12T19:34:23,210 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/708ce42716c047709ecffcab670a9ef3, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:23,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/708ce42716c047709ecffcab670a9ef3 is 175, key is test_row_0/A:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:23,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742169_1345 (size=30955) 2024-11-12T19:34:23,236 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/708ce42716c047709ecffcab670a9ef3 2024-11-12T19:34:23,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-12T19:34:23,241 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-12T19:34:23,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:23,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440123242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440123243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-12T19:34:23,247 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:23,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:23,247 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:23,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:23,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440123244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/ce523650f457449ab17d8c8de60da30d is 50, key is test_row_0/B:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:23,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742170_1346 (size=12001) 2024-11-12T19:34:23,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:23,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:23,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:23,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:23,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440123448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440123448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440123453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:23,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:23,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:23,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:23,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/ce523650f457449ab17d8c8de60da30d 2024-11-12T19:34:23,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/788d9415f58b4c439b7b5d6f2dfecc82 is 50, key is test_row_0/C:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:23,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742171_1347 (size=12001) 2024-11-12T19:34:23,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:23,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:23,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:23,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440123751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440123752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440123765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:23,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:23,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:23,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:23,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:23,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:24,043 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:24,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:24,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:24,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/788d9415f58b4c439b7b5d6f2dfecc82 2024-11-12T19:34:24,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/708ce42716c047709ecffcab670a9ef3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3 2024-11-12T19:34:24,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3, entries=150, sequenceid=81, filesize=30.2 K 2024-11-12T19:34:24,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/ce523650f457449ab17d8c8de60da30d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d 2024-11-12T19:34:24,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d, entries=150, sequenceid=81, filesize=11.7 K 2024-11-12T19:34:24,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/788d9415f58b4c439b7b5d6f2dfecc82 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82 2024-11-12T19:34:24,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82, entries=150, sequenceid=81, filesize=11.7 K 2024-11-12T19:34:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 758f25fb434410405582dc106004e936 in 1016ms, sequenceid=81, compaction requested=true 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:24,130 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:24,130 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:24,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:24,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,131 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:24,131 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:24,131 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:24,131 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:24,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,132 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,132 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,132 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=129.3 K 2024-11-12T19:34:24,132 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=44.6 K 2024-11-12T19:34:24,132 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,132 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3] 2024-11-12T19:34:24,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,132 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 92e2731190e04c2ab796458b59436c73, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731440059140 2024-11-12T19:34:24,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,132 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf47fea3631a4e369c29e92e0c768738, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731440059140 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 111f4ff521b34517bc554c0726866a8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731440059169 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebc97ad9384249ea8bdb2952435adb5c, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731440059169 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1eb425dd9a77489cbee7e5c9792d11ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440059784 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78be46fc33f140a7a82ec894fffe6af5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440059784 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 708ce42716c047709ecffcab670a9ef3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:24,133 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ce523650f457449ab17d8c8de60da30d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:24,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,143 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,149 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#294 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:24,149 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/a17fe2454a264388b6e78929f782d608 is 50, key is test_row_0/B:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,155 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411123b103995f2534c91b95a9e3ebfdec51b_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,164 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411123b103995f2534c91b95a9e3ebfdec51b_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:24,164 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411123b103995f2534c91b95a9e3ebfdec51b_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742172_1348 (size=12139) 2024-11-12T19:34:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742173_1349 (size=4469) 2024-11-12T19:34:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,174 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#295 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,175 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/adfef077ac7a4626a71982a735c1f9ef is 175, key is test_row_0/A:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:24,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,180 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/a17fe2454a264388b6e78929f782d608 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a17fe2454a264388b6e78929f782d608 2024-11-12T19:34:24,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,188 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into a17fe2454a264388b6e78929f782d608(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:24,188 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:24,188 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=12, startTime=1731440064130; duration=0sec 2024-11-12T19:34:24,188 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:24,188 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:24,188 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,190 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:24,190 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:24,190 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,190 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=44.6 K 2024-11-12T19:34:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d477e747894e4ed1bc7e0bfeaabe2fac, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731440059140 2024-11-12T19:34:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c2f347d1c60b43fbaf18174938e1cae9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731440059169 2024-11-12T19:34:24,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,191 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c1ae73ea6d647899e3b215616fd81d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440059784 2024-11-12T19:34:24,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742174_1350 (size=31093) 2024-11-12T19:34:24,192 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 788d9415f58b4c439b7b5d6f2dfecc82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:24,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,198 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:24,199 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/adfef077ac7a4626a71982a735c1f9ef as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef 2024-11-12T19:34:24,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-12T19:34:24,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,199 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-12T19:34:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:24,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:24,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:24,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:24,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:24,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,215 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#296 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:24,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,216 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/0da3c2a0ea5d477f89d8a860b69d4168 is 50, key is test_row_0/C:col10/1731440061970/Put/seqid=0 2024-11-12T19:34:24,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,219 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into adfef077ac7a4626a71982a735c1f9ef(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,219 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:24,219 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=12, startTime=1731440064130; duration=0sec 2024-11-12T19:34:24,219 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:24,219 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:24,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742175_1351 (size=12139) 2024-11-12T19:34:24,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411126491fe25944f407a9d34912580e2e00e_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440063123/Put/seqid=0 2024-11-12T19:34:24,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742176_1352 (size=9714) 2024-11-12T19:34:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,305 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411126491fe25944f407a9d34912580e2e00e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411126491fe25944f407a9d34912580e2e00e_758f25fb434410405582dc106004e936 2024-11-12T19:34:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d8aff2c4a87a45a6b126c9ceb616d202, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d8aff2c4a87a45a6b126c9ceb616d202 is 175, key is test_row_0/A:col10/1731440063123/Put/seqid=0 2024-11-12T19:34:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:24,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742177_1353 (size=22361) 2024-11-12T19:34:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:24,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440124398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440124401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440124402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440124403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440124404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440124505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440124512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440124512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440124512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440124515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,659 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/0da3c2a0ea5d477f89d8a860b69d4168 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0da3c2a0ea5d477f89d8a860b69d4168 2024-11-12T19:34:24,665 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into 0da3c2a0ea5d477f89d8a860b69d4168(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:24,665 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:24,665 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=12, startTime=1731440064130; duration=0sec 2024-11-12T19:34:24,665 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:24,665 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:24,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440124719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440124721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440124721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440124722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440124730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:24,745 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=88, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d8aff2c4a87a45a6b126c9ceb616d202 2024-11-12T19:34:24,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/cc98873a3493454b9993521aa6c2e77b is 50, key is test_row_0/B:col10/1731440063123/Put/seqid=0 2024-11-12T19:34:24,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742178_1354 (size=9657) 2024-11-12T19:34:24,801 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/cc98873a3493454b9993521aa6c2e77b 2024-11-12T19:34:24,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/b8234d06264d41e7ac445c4d2e1420a5 is 50, key is test_row_0/C:col10/1731440063123/Put/seqid=0 2024-11-12T19:34:24,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742179_1355 (size=9657) 2024-11-12T19:34:24,888 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/b8234d06264d41e7ac445c4d2e1420a5 2024-11-12T19:34:24,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d8aff2c4a87a45a6b126c9ceb616d202 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202 2024-11-12T19:34:24,936 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202, entries=100, sequenceid=88, filesize=21.8 K 2024-11-12T19:34:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/cc98873a3493454b9993521aa6c2e77b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b 2024-11-12T19:34:24,969 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b, entries=100, sequenceid=88, filesize=9.4 K 2024-11-12T19:34:24,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/b8234d06264d41e7ac445c4d2e1420a5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5 2024-11-12T19:34:24,977 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5, entries=100, sequenceid=88, filesize=9.4 K 2024-11-12T19:34:24,978 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 758f25fb434410405582dc106004e936 in 778ms, sequenceid=88, compaction requested=false 2024-11-12T19:34:24,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:24,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:24,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-12T19:34:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-12T19:34:25,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-12T19:34:25,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7390 sec 2024-11-12T19:34:25,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.7660 sec 2024-11-12T19:34:25,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-12T19:34:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440125043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440125044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440125051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411128a496a79a3cd41c88bedbcb79e15b553_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440125060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440125057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742180_1356 (size=12154) 2024-11-12T19:34:25,096 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,100 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411128a496a79a3cd41c88bedbcb79e15b553_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128a496a79a3cd41c88bedbcb79e15b553_758f25fb434410405582dc106004e936 2024-11-12T19:34:25,102 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/7fadfeb0e3284a0584eacd05cbffff12, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/7fadfeb0e3284a0584eacd05cbffff12 is 175, key is test_row_0/A:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742181_1357 (size=30955) 2024-11-12T19:34:25,146 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=122, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/7fadfeb0e3284a0584eacd05cbffff12 2024-11-12T19:34:25,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440125160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0f40d7ca193b4e50b2467da8f3ef9dfd is 50, key is test_row_0/B:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440125170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440125171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440125174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742182_1358 (size=12001) 2024-11-12T19:34:25,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0f40d7ca193b4e50b2467da8f3ef9dfd 2024-11-12T19:34:25,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/1b04518687ac4d06902afb2011197e85 is 50, key is test_row_0/C:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742183_1359 (size=12001) 2024-11-12T19:34:25,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/1b04518687ac4d06902afb2011197e85 2024-11-12T19:34:25,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/7fadfeb0e3284a0584eacd05cbffff12 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12 2024-11-12T19:34:25,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12, entries=150, sequenceid=122, filesize=30.2 K 2024-11-12T19:34:25,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0f40d7ca193b4e50b2467da8f3ef9dfd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd 2024-11-12T19:34:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd, entries=150, sequenceid=122, filesize=11.7 K 2024-11-12T19:34:25,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/1b04518687ac4d06902afb2011197e85 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85 2024-11-12T19:34:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85, entries=150, sequenceid=122, filesize=11.7 K 2024-11-12T19:34:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for 758f25fb434410405582dc106004e936 in 270ms, sequenceid=122, compaction requested=true 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:25,313 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:25,313 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:25,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,319 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:25,319 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84409 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:25,319 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:25,319 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:25,319 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,320 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=82.4 K 2024-11-12T19:34:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,320 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,320 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,320 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12] 2024-11-12T19:34:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,320 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a17fe2454a264388b6e78929f782d608, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=33.0 K 2024-11-12T19:34:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,321 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting adfef077ac7a4626a71982a735c1f9ef, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:25,321 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a17fe2454a264388b6e78929f782d608, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,322 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting cc98873a3493454b9993521aa6c2e77b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440063123 2024-11-12T19:34:25,322 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8aff2c4a87a45a6b126c9ceb616d202, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440063123 2024-11-12T19:34:25,322 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f40d7ca193b4e50b2467da8f3ef9dfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:25,322 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fadfeb0e3284a0584eacd05cbffff12, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:25,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-12T19:34:25,351 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-12T19:34:25,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-12T19:34:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,355 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:25,355 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:25,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,356 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,356 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/47c8597c43c3481abb2d058b9f71272c is 50, key is test_row_0/B:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,358 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,363 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111242cb012ddb5a4526913d6d0cea16b90e_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742184_1360 (size=12241) 2024-11-12T19:34:25,365 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111242cb012ddb5a4526913d6d0cea16b90e_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,365 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111242cb012ddb5a4526913d6d0cea16b90e_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742185_1361 (size=4469) 2024-11-12T19:34:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,378 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#304 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/024893887e8243c59727d136638cff28 is 175, key is test_row_0/A:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742186_1362 (size=31195) 2024-11-12T19:34:25,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,396 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/024893887e8243c59727d136638cff28 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28 2024-11-12T19:34:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,401 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into 024893887e8243c59727d136638cff28(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:25,401 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:25,401 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=13, startTime=1731440065313; duration=0sec 2024-11-12T19:34:25,401 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:25,401 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:25,401 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,403 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:25,403 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:25,403 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,403 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0da3c2a0ea5d477f89d8a860b69d4168, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=33.0 K 2024-11-12T19:34:25,404 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0da3c2a0ea5d477f89d8a860b69d4168, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731440061970 2024-11-12T19:34:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,404 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8234d06264d41e7ac445c4d2e1420a5, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440063123 2024-11-12T19:34:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,404 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b04518687ac4d06902afb2011197e85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,412 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,413 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c3b693f4d7044905a2fd190260cb6f84 is 50, key is test_row_0/C:col10/1731440065039/Put/seqid=0 2024-11-12T19:34:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:25,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ec47212587ad4741ba6243058406929f_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440065421/Put/seqid=0 2024-11-12T19:34:25,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742187_1363 (size=12241) 2024-11-12T19:34:25,455 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c3b693f4d7044905a2fd190260cb6f84 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c3b693f4d7044905a2fd190260cb6f84 2024-11-12T19:34:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:25,463 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into c3b693f4d7044905a2fd190260cb6f84(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:25,463 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:25,463 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=13, startTime=1731440065313; duration=0sec 2024-11-12T19:34:25,463 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:25,463 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:25,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:25,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-12T19:34:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742189_1365 (size=24658) 2024-11-12T19:34:25,553 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:25,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440125553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440125555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440125556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440125558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,567 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ec47212587ad4741ba6243058406929f_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ec47212587ad4741ba6243058406929f_758f25fb434410405582dc106004e936 2024-11-12T19:34:25,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440125562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,572 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ff46bc424f16482491a1984636b3f3fe, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:25,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ff46bc424f16482491a1984636b3f3fe is 175, key is test_row_0/A:col10/1731440065421/Put/seqid=0 2024-11-12T19:34:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742188_1364 (size=74295) 2024-11-12T19:34:25,576 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ff46bc424f16482491a1984636b3f3fe 2024-11-12T19:34:25,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0281444efbb342c58a1c70e5120b4b76 is 50, key is test_row_0/B:col10/1731440065421/Put/seqid=0 2024-11-12T19:34:25,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742190_1366 (size=12151) 2024-11-12T19:34:25,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0281444efbb342c58a1c70e5120b4b76 2024-11-12T19:34:25,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/e0c065022f6e4bfcb93b3c3708a71004 is 50, key is test_row_0/C:col10/1731440065421/Put/seqid=0 2024-11-12T19:34:25,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742191_1367 (size=12151) 2024-11-12T19:34:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:25,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440125665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440125665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440125666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440125673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,690 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:25,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-12T19:34:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-12T19:34:25,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-12T19:34:25,775 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/47c8597c43c3481abb2d058b9f71272c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/47c8597c43c3481abb2d058b9f71272c 2024-11-12T19:34:25,787 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into 47c8597c43c3481abb2d058b9f71272c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:25,787 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:25,787 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=13, startTime=1731440065313; duration=0sec 2024-11-12T19:34:25,787 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:25,787 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:25,845 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:25,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-12T19:34:25,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:25,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:25,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:25,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440125873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440125873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440125874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440125874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:26,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:26,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-12T19:34:26,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:26,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:26,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:26,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:26,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:26,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/e0c065022f6e4bfcb93b3c3708a71004 2024-11-12T19:34:26,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ff46bc424f16482491a1984636b3f3fe as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe 2024-11-12T19:34:26,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe, entries=400, sequenceid=134, filesize=72.6 K 2024-11-12T19:34:26,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/0281444efbb342c58a1c70e5120b4b76 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76 2024-11-12T19:34:26,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76, entries=150, sequenceid=134, filesize=11.9 K 2024-11-12T19:34:26,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/e0c065022f6e4bfcb93b3c3708a71004 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004 2024-11-12T19:34:26,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004, entries=150, sequenceid=134, filesize=11.9 K 2024-11-12T19:34:26,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 758f25fb434410405582dc106004e936 in 660ms, sequenceid=134, compaction requested=false 2024-11-12T19:34:26,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:26,159 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:26,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-12T19:34:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:26,163 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:34:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:26,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440126195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440126197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440126198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440126201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e34f63b521504362bbdeabe3ce1f68c6_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440065550/Put/seqid=0 2024-11-12T19:34:26,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742192_1368 (size=12304) 2024-11-12T19:34:26,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:26,265 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e34f63b521504362bbdeabe3ce1f68c6_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e34f63b521504362bbdeabe3ce1f68c6_758f25fb434410405582dc106004e936 2024-11-12T19:34:26,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/63a8c9a44d4848bbb38c7499805edb12, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:26,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/63a8c9a44d4848bbb38c7499805edb12 is 175, key is test_row_0/A:col10/1731440065550/Put/seqid=0 2024-11-12T19:34:26,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742193_1369 (size=31105) 2024-11-12T19:34:26,305 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/63a8c9a44d4848bbb38c7499805edb12 2024-11-12T19:34:26,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440126311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6957a8b0bf5448fb97616c17369ce48d is 50, key is test_row_0/B:col10/1731440065550/Put/seqid=0 2024-11-12T19:34:26,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440126311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440126315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440126316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742194_1370 (size=12151) 2024-11-12T19:34:26,359 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6957a8b0bf5448fb97616c17369ce48d 2024-11-12T19:34:26,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/f03c0423a8cd4fe89404fa68261748bd is 50, key is test_row_0/C:col10/1731440065550/Put/seqid=0 2024-11-12T19:34:26,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742195_1371 (size=12151) 2024-11-12T19:34:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:26,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440126521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440126528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440126529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440126529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440126576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,787 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/f03c0423a8cd4fe89404fa68261748bd 2024-11-12T19:34:26,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/63a8c9a44d4848bbb38c7499805edb12 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12 2024-11-12T19:34:26,795 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12, entries=150, sequenceid=161, filesize=30.4 K 2024-11-12T19:34:26,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6957a8b0bf5448fb97616c17369ce48d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d 2024-11-12T19:34:26,801 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d, entries=150, sequenceid=161, filesize=11.9 K 2024-11-12T19:34:26,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/f03c0423a8cd4fe89404fa68261748bd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd 2024-11-12T19:34:26,806 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd, entries=150, sequenceid=161, filesize=11.9 K 2024-11-12T19:34:26,815 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 758f25fb434410405582dc106004e936 in 644ms, sequenceid=161, compaction requested=true 2024-11-12T19:34:26,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:26,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:26,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-12T19:34:26,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-12T19:34:26,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-12T19:34:26,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4610 sec 2024-11-12T19:34:26,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.4700 sec 2024-11-12T19:34:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:26,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:26,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127bdc7a80abf04aab9aad086b80b3f9ef_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:26,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742196_1372 (size=14794) 2024-11-12T19:34:26,867 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:26,880 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127bdc7a80abf04aab9aad086b80b3f9ef_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127bdc7a80abf04aab9aad086b80b3f9ef_758f25fb434410405582dc106004e936 2024-11-12T19:34:26,884 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9676030831264813b9cbecf025682980, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:26,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9676030831264813b9cbecf025682980 is 175, key is test_row_0/A:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742197_1373 (size=39749) 2024-11-12T19:34:26,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440126904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440126905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440126910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:26,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:26,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440126910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440127019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440127022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440127023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440127023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440127230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440127234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440127237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440127244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,312 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9676030831264813b9cbecf025682980 2024-11-12T19:34:27,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/f735ec199df54240931ea7baedc7c0e5 is 50, key is test_row_0/B:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:27,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742198_1374 (size=12151) 2024-11-12T19:34:27,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/f735ec199df54240931ea7baedc7c0e5 2024-11-12T19:34:27,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/50f1ad82d1544b23866fcb65a3abd186 is 50, key is test_row_0/C:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:27,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742199_1375 (size=12151) 2024-11-12T19:34:27,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/50f1ad82d1544b23866fcb65a3abd186 2024-11-12T19:34:27,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9676030831264813b9cbecf025682980 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980 2024-11-12T19:34:27,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980, entries=200, sequenceid=173, filesize=38.8 K 2024-11-12T19:34:27,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/f735ec199df54240931ea7baedc7c0e5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5 2024-11-12T19:34:27,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5, entries=150, sequenceid=173, filesize=11.9 K 2024-11-12T19:34:27,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/50f1ad82d1544b23866fcb65a3abd186 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186 2024-11-12T19:34:27,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186, entries=150, sequenceid=173, filesize=11.9 K 2024-11-12T19:34:27,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 758f25fb434410405582dc106004e936 in 630ms, sequenceid=173, compaction requested=true 2024-11-12T19:34:27,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:27,462 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:27,464 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 176344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:27,464 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:27,464 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,464 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=172.2 K 2024-11-12T19:34:27,464 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,464 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980] 2024-11-12T19:34:27,464 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 024893887e8243c59727d136638cff28, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:27,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff46bc424f16482491a1984636b3f3fe, keycount=400, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731440065050 2024-11-12T19:34:27,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63a8c9a44d4848bbb38c7499805edb12, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731440065547 2024-11-12T19:34:27,465 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9676030831264813b9cbecf025682980, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-12T19:34:27,475 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-12T19:34:27,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:27,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:27,477 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-12T19:34:27,478 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:27,478 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:27,479 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:27,479 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,479 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:27,479 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/47c8597c43c3481abb2d058b9f71272c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=47.6 K 2024-11-12T19:34:27,479 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:27,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:27,479 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 47c8597c43c3481abb2d058b9f71272c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:27,480 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0281444efbb342c58a1c70e5120b4b76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731440065404 2024-11-12T19:34:27,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:27,481 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6957a8b0bf5448fb97616c17369ce48d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731440065547 2024-11-12T19:34:27,481 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting f735ec199df54240931ea7baedc7c0e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:27,493 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112b867c3a8345f4d2592e470c8b13b2420_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:27,496 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112b867c3a8345f4d2592e470c8b13b2420_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:27,496 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112b867c3a8345f4d2592e470c8b13b2420_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:27,508 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#316 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:27,509 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/e028b01e5e464b3c81de0136b011940d is 50, key is test_row_0/B:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:27,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742200_1376 (size=4469) 2024-11-12T19:34:27,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:27,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:27,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742201_1377 (size=12527) 2024-11-12T19:34:27,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:27,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a96ff591a26a4ba2a3c69dfd26c9383a_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440066905/Put/seqid=0 2024-11-12T19:34:27,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742202_1378 (size=14794) 2024-11-12T19:34:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440127603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440127604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440127606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440127606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,635 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:27,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:27,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:27,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440127723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440127725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440127725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440127731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:27,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:27,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,938 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#315 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:27,938 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/0a4c42402e804d99bd0d20ee99033604 is 175, key is test_row_0/A:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:27,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440127929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440127933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440127936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,949 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:27,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:27,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:27,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440127945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:27,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742203_1379 (size=31481) 2024-11-12T19:34:27,976 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/0a4c42402e804d99bd0d20ee99033604 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604 2024-11-12T19:34:27,985 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/e028b01e5e464b3c81de0136b011940d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e028b01e5e464b3c81de0136b011940d 2024-11-12T19:34:27,986 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into 0a4c42402e804d99bd0d20ee99033604(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:27,986 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:27,986 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=12, startTime=1731440067462; duration=0sec 2024-11-12T19:34:27,986 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:27,986 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:27,986 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:27,992 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:27,992 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:27,992 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:27,993 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c3b693f4d7044905a2fd190260cb6f84, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=47.6 K 2024-11-12T19:34:27,993 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3b693f4d7044905a2fd190260cb6f84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1731440064403 2024-11-12T19:34:27,999 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0c065022f6e4bfcb93b3c3708a71004, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731440065404 2024-11-12T19:34:28,000 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f03c0423a8cd4fe89404fa68261748bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731440065547 2024-11-12T19:34:28,001 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50f1ad82d1544b23866fcb65a3abd186, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:28,001 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into e028b01e5e464b3c81de0136b011940d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:28,002 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:28,002 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=12, startTime=1731440067477; duration=0sec 2024-11-12T19:34:28,002 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:28,002 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:28,020 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#318 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:28,021 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/3df8f9a66f2443d6a07074b8a6291df3 is 50, key is test_row_0/C:col10/1731440066831/Put/seqid=0 2024-11-12T19:34:28,023 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:28,027 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a96ff591a26a4ba2a3c69dfd26c9383a_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a96ff591a26a4ba2a3c69dfd26c9383a_758f25fb434410405582dc106004e936 2024-11-12T19:34:28,028 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a6ab8f0a6a7641469ac993c68c539235, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:28,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a6ab8f0a6a7641469ac993c68c539235 is 175, key is test_row_0/A:col10/1731440066905/Put/seqid=0 2024-11-12T19:34:28,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742204_1380 (size=12527) 2024-11-12T19:34:28,034 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/3df8f9a66f2443d6a07074b8a6291df3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3df8f9a66f2443d6a07074b8a6291df3 2024-11-12T19:34:28,038 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into 3df8f9a66f2443d6a07074b8a6291df3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:28,038 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:28,038 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=12, startTime=1731440067488; duration=0sec 2024-11-12T19:34:28,038 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:28,038 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:28,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742205_1381 (size=39749) 2024-11-12T19:34:28,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:28,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:28,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:28,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:28,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440128246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440128247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440128251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440128263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,266 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:28,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:28,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:28,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:28,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:28,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:28,441 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a6ab8f0a6a7641469ac993c68c539235 2024-11-12T19:34:28,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/86e65b910e314960b6617290c5508b64 is 50, key is test_row_0/B:col10/1731440066905/Put/seqid=0 2024-11-12T19:34:28,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742206_1382 (size=12151) 2024-11-12T19:34:28,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/86e65b910e314960b6617290c5508b64 2024-11-12T19:34:28,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/3abd8b248f224d0c9df5ae55e9104b68 is 50, key is test_row_0/C:col10/1731440066905/Put/seqid=0 2024-11-12T19:34:28,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742207_1383 (size=12151) 2024-11-12T19:34:28,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/3abd8b248f224d0c9df5ae55e9104b68 2024-11-12T19:34:28,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a6ab8f0a6a7641469ac993c68c539235 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235 2024-11-12T19:34:28,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235, entries=200, sequenceid=199, filesize=38.8 K 2024-11-12T19:34:28,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/86e65b910e314960b6617290c5508b64 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64 2024-11-12T19:34:28,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64, entries=150, sequenceid=199, filesize=11.9 K 2024-11-12T19:34:28,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/3abd8b248f224d0c9df5ae55e9104b68 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68 2024-11-12T19:34:28,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68, entries=150, sequenceid=199, filesize=11.9 K 2024-11-12T19:34:28,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 758f25fb434410405582dc106004e936 in 988ms, sequenceid=199, compaction requested=false 2024-11-12T19:34:28,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:28,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:28,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-12T19:34:28,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:28,590 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:28,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125f7c6a1e3e1340fb931f2ffdacc0bc28_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440067603/Put/seqid=0 2024-11-12T19:34:28,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:28,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:28,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742208_1384 (size=12304) 2024-11-12T19:34:28,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:28,646 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125f7c6a1e3e1340fb931f2ffdacc0bc28_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f7c6a1e3e1340fb931f2ffdacc0bc28_758f25fb434410405582dc106004e936 2024-11-12T19:34:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebae37f42d2843f985a2378f353ed16c, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:28,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebae37f42d2843f985a2378f353ed16c is 175, key is test_row_0/A:col10/1731440067603/Put/seqid=0 2024-11-12T19:34:28,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742209_1385 (size=31105) 2024-11-12T19:34:28,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440128765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440128768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440128768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440128768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440128774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440128875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440128877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:28,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440128881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,071 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebae37f42d2843f985a2378f353ed16c 2024-11-12T19:34:29,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/21ac70411b554fcf82c5a50af16f1263 is 50, key is test_row_0/B:col10/1731440067603/Put/seqid=0 2024-11-12T19:34:29,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742210_1386 (size=12151) 2024-11-12T19:34:29,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440129084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440129084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440129089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440129388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440129390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440129398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,487 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/21ac70411b554fcf82c5a50af16f1263 2024-11-12T19:34:29,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/470c90be92494757b30097eae7fee8c3 is 50, key is test_row_0/C:col10/1731440067603/Put/seqid=0 2024-11-12T19:34:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742211_1387 (size=12151) 2024-11-12T19:34:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440129785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440129787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440129897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440129903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:29,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440129907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:29,912 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/470c90be92494757b30097eae7fee8c3 2024-11-12T19:34:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/ebae37f42d2843f985a2378f353ed16c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c 2024-11-12T19:34:29,924 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c, entries=150, sequenceid=213, filesize=30.4 K 2024-11-12T19:34:29,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/21ac70411b554fcf82c5a50af16f1263 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263 2024-11-12T19:34:29,934 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263, entries=150, sequenceid=213, filesize=11.9 K 2024-11-12T19:34:29,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/470c90be92494757b30097eae7fee8c3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3 2024-11-12T19:34:29,939 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3, entries=150, sequenceid=213, filesize=11.9 K 2024-11-12T19:34:29,941 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 758f25fb434410405582dc106004e936 in 1350ms, sequenceid=213, compaction requested=true 2024-11-12T19:34:29,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:29,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:29,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-12T19:34:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-12T19:34:29,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-12T19:34:29,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4660 sec 2024-11-12T19:34:29,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.4800 sec 2024-11-12T19:34:30,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:30,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:30,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:30,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e0a6eb72f035473f800918c81d9a7ec6_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:30,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742212_1388 (size=14794) 2024-11-12T19:34:30,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:30,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440130925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:30,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:30,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440130929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:30,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:30,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440130930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440131033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440131033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440131041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440131240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440131241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440131248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,325 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:31,329 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e0a6eb72f035473f800918c81d9a7ec6_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e0a6eb72f035473f800918c81d9a7ec6_758f25fb434410405582dc106004e936 2024-11-12T19:34:31,330 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/cf2b2672b7b245c29898a5298bdc685a, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:31,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/cf2b2672b7b245c29898a5298bdc685a is 175, key is test_row_0/A:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:31,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742213_1389 (size=39749) 2024-11-12T19:34:31,341 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/cf2b2672b7b245c29898a5298bdc685a 2024-11-12T19:34:31,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/a1bd5ac268d748f3b2a849362b1a29e6 is 50, key is test_row_0/B:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:31,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742214_1390 (size=12151) 2024-11-12T19:34:31,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/a1bd5ac268d748f3b2a849362b1a29e6 2024-11-12T19:34:31,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/301be698710f4cefaf017f414880807c is 50, key is test_row_0/C:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742215_1391 (size=12151) 2024-11-12T19:34:31,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440131548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440131548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440131558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-12T19:34:31,588 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-12T19:34:31,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-12T19:34:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:31,598 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:31,599 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:31,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:31,752 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:31,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:31,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:31,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440131790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,796 DEBUG [Thread-1512 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4191 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:34:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440131799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:31,807 DEBUG [Thread-1508 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:34:31,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/301be698710f4cefaf017f414880807c 2024-11-12T19:34:31,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/cf2b2672b7b245c29898a5298bdc685a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a 2024-11-12T19:34:31,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a, entries=200, sequenceid=239, filesize=38.8 K 2024-11-12T19:34:31,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/a1bd5ac268d748f3b2a849362b1a29e6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6 2024-11-12T19:34:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:31,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6, entries=150, sequenceid=239, filesize=11.9 K 2024-11-12T19:34:31,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:31,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:31,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:31,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/301be698710f4cefaf017f414880807c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c 2024-11-12T19:34:31,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c, entries=150, sequenceid=239, filesize=11.9 K 2024-11-12T19:34:31,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 758f25fb434410405582dc106004e936 in 1025ms, sequenceid=239, compaction requested=true 2024-11-12T19:34:31,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:31,934 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:31,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:31,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:31,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:31,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:31,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:31,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:34:31,938 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:31,940 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142084 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:31,941 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:31,941 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,941 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=138.8 K 2024-11-12T19:34:31,941 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,941 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a] 2024-11-12T19:34:31,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a4c42402e804d99bd0d20ee99033604, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:31,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a6ab8f0a6a7641469ac993c68c539235, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731440066899 2024-11-12T19:34:31,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ebae37f42d2843f985a2378f353ed16c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731440067601 2024-11-12T19:34:31,943 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting cf2b2672b7b245c29898a5298bdc685a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068763 2024-11-12T19:34:31,951 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:31,951 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:31,951 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:31,951 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e028b01e5e464b3c81de0136b011940d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=47.8 K 2024-11-12T19:34:31,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e028b01e5e464b3c81de0136b011940d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:31,957 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86e65b910e314960b6617290c5508b64, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731440066899 2024-11-12T19:34:31,958 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21ac70411b554fcf82c5a50af16f1263, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731440067601 2024-11-12T19:34:31,958 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1bd5ac268d748f3b2a849362b1a29e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068764 2024-11-12T19:34:32,008 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:32,015 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:32,016 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6c9d020516194f938fa43989425c7fb4 is 50, key is test_row_0/B:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:32,019 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411128322707e8197419c8fd5840b45625550_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:32,022 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411128322707e8197419c8fd5840b45625550_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:32,022 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411128322707e8197419c8fd5840b45625550_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:32,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742216_1392 (size=12663) 2024-11-12T19:34:32,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:32,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:32,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:32,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:32,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:32,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:32,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:32,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:32,074 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6c9d020516194f938fa43989425c7fb4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6c9d020516194f938fa43989425c7fb4 2024-11-12T19:34:32,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742217_1393 (size=4469) 2024-11-12T19:34:32,083 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#327 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:32,083 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into 6c9d020516194f938fa43989425c7fb4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:32,083 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:32,083 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=12, startTime=1731440071935; duration=0sec 2024-11-12T19:34:32,083 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:32,083 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:32,083 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:32,083 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/23514fe3ea1f442690e78ac67a404db6 is 175, key is test_row_0/A:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:32,086 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:32,086 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:32,086 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,087 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3df8f9a66f2443d6a07074b8a6291df3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=47.8 K 2024-11-12T19:34:32,088 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3df8f9a66f2443d6a07074b8a6291df3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731440066185 2024-11-12T19:34:32,088 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3abd8b248f224d0c9df5ae55e9104b68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731440066899 2024-11-12T19:34:32,089 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 470c90be92494757b30097eae7fee8c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731440067601 2024-11-12T19:34:32,089 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 301be698710f4cefaf017f414880807c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068764 2024-11-12T19:34:32,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742218_1394 (size=31617) 2024-11-12T19:34:32,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120560d26bd8a7416dbe193508501e7511_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440072056/Put/seqid=0 2024-11-12T19:34:32,113 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:32,114 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/24e53b68365e464ca1a2160243587af4 is 50, key is test_row_0/C:col10/1731440070908/Put/seqid=0 2024-11-12T19:34:32,118 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/23514fe3ea1f442690e78ac67a404db6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6 2024-11-12T19:34:32,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742219_1395 (size=12304) 2024-11-12T19:34:32,125 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:32,130 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120560d26bd8a7416dbe193508501e7511_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120560d26bd8a7416dbe193508501e7511_758f25fb434410405582dc106004e936 2024-11-12T19:34:32,131 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into 23514fe3ea1f442690e78ac67a404db6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:32,131 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:32,131 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=12, startTime=1731440071934; duration=0sec 2024-11-12T19:34:32,131 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:32,131 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:32,132 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9cda07c5f123448791c4d951d630d7ab, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:32,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9cda07c5f123448791c4d951d630d7ab is 175, key is test_row_0/A:col10/1731440072056/Put/seqid=0 2024-11-12T19:34:32,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742220_1396 (size=12663) 2024-11-12T19:34:32,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742221_1397 (size=31105) 2024-11-12T19:34:32,188 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/24e53b68365e464ca1a2160243587af4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/24e53b68365e464ca1a2160243587af4 2024-11-12T19:34:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:32,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440132200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,212 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into 24e53b68365e464ca1a2160243587af4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:32,212 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:32,212 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=12, startTime=1731440071935; duration=0sec 2024-11-12T19:34:32,212 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:32,212 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:32,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440132200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440132209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,223 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440132308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440132319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440132320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,380 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440132515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440132524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,533 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440132531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,572 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9cda07c5f123448791c4d951d630d7ab 2024-11-12T19:34:32,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/121d255d03a04cd496d6bdabaaf11e4b is 50, key is test_row_0/B:col10/1731440072056/Put/seqid=0 2024-11-12T19:34:32,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742222_1398 (size=12151) 2024-11-12T19:34:32,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440132823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440132831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:32,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440132839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:32,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:32,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:32,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:32,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:32,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/121d255d03a04cd496d6bdabaaf11e4b 2024-11-12T19:34:33,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c0ba55af17d14d13b6065dd934ee9838 is 50, key is test_row_0/C:col10/1731440072056/Put/seqid=0 2024-11-12T19:34:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742223_1399 (size=12151) 2024-11-12T19:34:33,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:33,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:33,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:33,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:33,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:33,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:33,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:33,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:33,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:33,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440133334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:33,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440133340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:33,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440133346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:33,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c0ba55af17d14d13b6065dd934ee9838 2024-11-12T19:34:33,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/9cda07c5f123448791c4d951d630d7ab as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab 2024-11-12T19:34:33,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab, entries=150, sequenceid=251, filesize=30.4 K 2024-11-12T19:34:33,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/121d255d03a04cd496d6bdabaaf11e4b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b 2024-11-12T19:34:33,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b, entries=150, sequenceid=251, filesize=11.9 K 2024-11-12T19:34:33,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/c0ba55af17d14d13b6065dd934ee9838 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838 2024-11-12T19:34:33,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838, entries=150, sequenceid=251, filesize=11.9 K 2024-11-12T19:34:33,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 758f25fb434410405582dc106004e936 in 1374ms, sequenceid=251, compaction requested=false 2024-11-12T19:34:33,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:33,457 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:33,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:33,458 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:33,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:33,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111283c1a28e3de94d12b4acdb1e5604fb4d_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440072203/Put/seqid=0 2024-11-12T19:34:33,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742224_1400 (size=12454) 2024-11-12T19:34:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:33,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:33,880 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111283c1a28e3de94d12b4acdb1e5604fb4d_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111283c1a28e3de94d12b4acdb1e5604fb4d_758f25fb434410405582dc106004e936 2024-11-12T19:34:33,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/38ccc943be334bd494b4718768842125, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:33,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/38ccc943be334bd494b4718768842125 is 175, key is test_row_0/A:col10/1731440072203/Put/seqid=0 2024-11-12T19:34:33,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742225_1401 (size=31255) 2024-11-12T19:34:34,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:34:34,299 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/38ccc943be334bd494b4718768842125 2024-11-12T19:34:34,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/56d7b441bdb54d97898e9266e121a9ad is 50, key is test_row_0/B:col10/1731440072203/Put/seqid=0 2024-11-12T19:34:34,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742226_1402 (size=12301) 2024-11-12T19:34:34,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:34,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:34,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440134370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440134374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440134380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440134482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440134483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440134483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440134688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440134691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:34,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440134695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:34,741 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/56d7b441bdb54d97898e9266e121a9ad 2024-11-12T19:34:34,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/0935a0b1c03b45789c14743253f9f7e8 is 50, key is test_row_0/C:col10/1731440072203/Put/seqid=0 2024-11-12T19:34:34,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742227_1403 (size=12301) 2024-11-12T19:34:35,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440134996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440134999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440135002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,181 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/0935a0b1c03b45789c14743253f9f7e8 2024-11-12T19:34:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/38ccc943be334bd494b4718768842125 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125 2024-11-12T19:34:35,193 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125, entries=150, sequenceid=278, filesize=30.5 K 2024-11-12T19:34:35,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/56d7b441bdb54d97898e9266e121a9ad as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad 2024-11-12T19:34:35,197 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad, entries=150, sequenceid=278, filesize=12.0 K 2024-11-12T19:34:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/0935a0b1c03b45789c14743253f9f7e8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8 2024-11-12T19:34:35,211 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8, entries=150, sequenceid=278, filesize=12.0 K 2024-11-12T19:34:35,213 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 758f25fb434410405582dc106004e936 in 1754ms, sequenceid=278, compaction requested=true 2024-11-12T19:34:35,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:35,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:35,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-12T19:34:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-12T19:34:35,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-12T19:34:35,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6160 sec 2024-11-12T19:34:35,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 3.6220 sec 2024-11-12T19:34:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:35,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:35,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a65746bb27fd40ba9cadfddff8d2ce9f_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742228_1404 (size=14994) 2024-11-12T19:34:35,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440135560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440135562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440135562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440135674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440135675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440135675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-12T19:34:35,711 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-12T19:34:35,714 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:35,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-12T19:34:35,716 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:35,716 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:35,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36286 deadline: 1731440135814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,824 DEBUG [Thread-1512 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:34:35,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36326 deadline: 1731440135835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,845 DEBUG [Thread-1508 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8242 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:34:35,868 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:35,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-12T19:34:35,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:35,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:35,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:35,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:35,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:35,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440135885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440135886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440135886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:35,918 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:35,921 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a65746bb27fd40ba9cadfddff8d2ce9f_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a65746bb27fd40ba9cadfddff8d2ce9f_758f25fb434410405582dc106004e936 2024-11-12T19:34:35,922 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/b52201543e4f432b9beb4bec4d39b87b, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:35,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/b52201543e4f432b9beb4bec4d39b87b is 175, key is test_row_0/A:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:35,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742229_1405 (size=39949) 2024-11-12T19:34:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:36,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:36,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-12T19:34:36,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:36,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,172 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:36,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-12T19:34:36,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:36,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440136190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440136191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440136192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:36,325 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:36,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-12T19:34:36,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:36,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,326 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/b52201543e4f432b9beb4bec4d39b87b 2024-11-12T19:34:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:36,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/e43d1e67d8fc43868bb6376da6d99294 is 50, key is test_row_0/B:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:36,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742230_1406 (size=12301) 2024-11-12T19:34:36,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/e43d1e67d8fc43868bb6376da6d99294 2024-11-12T19:34:36,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/6ae3c91fb6fd4d19bc82efe30bba8f14 is 50, key is test_row_0/C:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:36,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742231_1407 (size=12301) 2024-11-12T19:34:36,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/6ae3c91fb6fd4d19bc82efe30bba8f14 2024-11-12T19:34:36,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/b52201543e4f432b9beb4bec4d39b87b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b 2024-11-12T19:34:36,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b, entries=200, sequenceid=291, filesize=39.0 K 2024-11-12T19:34:36,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/e43d1e67d8fc43868bb6376da6d99294 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294 2024-11-12T19:34:36,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294, entries=150, sequenceid=291, filesize=12.0 K 2024-11-12T19:34:36,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/6ae3c91fb6fd4d19bc82efe30bba8f14 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14 2024-11-12T19:34:36,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14, entries=150, sequenceid=291, filesize=12.0 K 2024-11-12T19:34:36,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 758f25fb434410405582dc106004e936 in 900ms, sequenceid=291, compaction requested=true 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:36,409 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:36,409 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:36,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133926 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:36,412 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,412 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,412 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6c9d020516194f938fa43989425c7fb4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=48.3 K 2024-11-12T19:34:36,412 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=130.8 K 2024-11-12T19:34:36,412 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b] 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23514fe3ea1f442690e78ac67a404db6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068764 2024-11-12T19:34:36,412 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c9d020516194f938fa43989425c7fb4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068764 2024-11-12T19:34:36,413 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cda07c5f123448791c4d951d630d7ab, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440070916 2024-11-12T19:34:36,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 121d255d03a04cd496d6bdabaaf11e4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440070916 2024-11-12T19:34:36,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 56d7b441bdb54d97898e9266e121a9ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731440072188 2024-11-12T19:34:36,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e43d1e67d8fc43868bb6376da6d99294, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074370 2024-11-12T19:34:36,415 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38ccc943be334bd494b4718768842125, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731440072188 2024-11-12T19:34:36,417 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b52201543e4f432b9beb4bec4d39b87b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074364 2024-11-12T19:34:36,436 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:36,439 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#340 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:36,440 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/12bcff7f816544848286219d72bad7cb is 50, key is test_row_0/B:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:36,441 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411120aaaef1291bb43b3a4d2038077f4cf83_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:36,444 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411120aaaef1291bb43b3a4d2038077f4cf83_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:36,444 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411120aaaef1291bb43b3a4d2038077f4cf83_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742233_1409 (size=4469) 2024-11-12T19:34:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742232_1408 (size=12949) 2024-11-12T19:34:36,466 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#339 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:36,466 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d64a525a64d2456a9b7bdab248961eb5 is 175, key is test_row_0/A:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:36,469 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/12bcff7f816544848286219d72bad7cb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/12bcff7f816544848286219d72bad7cb 2024-11-12T19:34:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742234_1410 (size=31903) 2024-11-12T19:34:36,475 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into 12bcff7f816544848286219d72bad7cb(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:36,475 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:36,475 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=12, startTime=1731440076409; duration=0sec 2024-11-12T19:34:36,475 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:36,475 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:36,475 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:36,476 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:36,476 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:36,476 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,476 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/24e53b68365e464ca1a2160243587af4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=48.3 K 2024-11-12T19:34:36,476 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 24e53b68365e464ca1a2160243587af4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731440068764 2024-11-12T19:34:36,476 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/d64a525a64d2456a9b7bdab248961eb5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5 2024-11-12T19:34:36,476 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c0ba55af17d14d13b6065dd934ee9838, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440070916 2024-11-12T19:34:36,477 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0935a0b1c03b45789c14743253f9f7e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731440072188 2024-11-12T19:34:36,477 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ae3c91fb6fd4d19bc82efe30bba8f14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074370 2024-11-12T19:34:36,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:36,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:36,479 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:36,481 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into d64a525a64d2456a9b7bdab248961eb5(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:36,481 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:36,481 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=12, startTime=1731440076409; duration=0sec 2024-11-12T19:34:36,481 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:36,481 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:36,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ea4af62b27024dbb907273a81cf56982_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440075561/Put/seqid=0 2024-11-12T19:34:36,492 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:36,493 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/ba5ab8e6f7a34906b451420a95a592a5 is 50, key is test_row_0/C:col10/1731440074370/Put/seqid=0 2024-11-12T19:34:36,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742235_1411 (size=12454) 2024-11-12T19:34:36,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742236_1412 (size=12949) 2024-11-12T19:34:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:36,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. as already flushing 2024-11-12T19:34:36,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440136709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440136709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440136710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440136813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:36,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440136818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440136818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:36,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:36,900 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ea4af62b27024dbb907273a81cf56982_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ea4af62b27024dbb907273a81cf56982_758f25fb434410405582dc106004e936 2024-11-12T19:34:36,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a2f5fb497a3042c09761eff32757b9e9, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:36,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a2f5fb497a3042c09761eff32757b9e9 is 175, key is test_row_0/A:col10/1731440075561/Put/seqid=0 2024-11-12T19:34:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742237_1413 (size=31255) 2024-11-12T19:34:36,915 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/ba5ab8e6f7a34906b451420a95a592a5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/ba5ab8e6f7a34906b451420a95a592a5 2024-11-12T19:34:36,924 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into ba5ab8e6f7a34906b451420a95a592a5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:36,925 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:36,925 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=12, startTime=1731440076409; duration=0sec 2024-11-12T19:34:36,925 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:36,925 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:37,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440137014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440137024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440137024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,308 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a2f5fb497a3042c09761eff32757b9e9 2024-11-12T19:34:37,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440137318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/d139f242ab104e7db1e0a9062f211782 is 50, key is test_row_0/B:col10/1731440075561/Put/seqid=0 2024-11-12T19:34:37,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440137331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440137333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742238_1414 (size=12301) 2024-11-12T19:34:37,358 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/d139f242ab104e7db1e0a9062f211782 2024-11-12T19:34:37,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/da46612ebb2841e2a3d110eeedcedf08 is 50, key is test_row_0/C:col10/1731440075561/Put/seqid=0 2024-11-12T19:34:37,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742239_1415 (size=12301) 2024-11-12T19:34:37,798 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/da46612ebb2841e2a3d110eeedcedf08 2024-11-12T19:34:37,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/a2f5fb497a3042c09761eff32757b9e9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9 2024-11-12T19:34:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:37,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440137827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,835 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9, entries=150, sequenceid=315, filesize=30.5 K 2024-11-12T19:34:37,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/d139f242ab104e7db1e0a9062f211782 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782 2024-11-12T19:34:37,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440137839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,841 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782, entries=150, sequenceid=315, filesize=12.0 K 2024-11-12T19:34:37,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/da46612ebb2841e2a3d110eeedcedf08 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08 2024-11-12T19:34:37,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440137847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:37,856 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08, entries=150, sequenceid=315, filesize=12.0 K 2024-11-12T19:34:37,867 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 758f25fb434410405582dc106004e936 in 1387ms, sequenceid=315, compaction requested=false 2024-11-12T19:34:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:37,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-12T19:34:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-12T19:34:37,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-12T19:34:37,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1520 sec 2024-11-12T19:34:37,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.1560 sec 2024-11-12T19:34:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:38,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:38,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:38,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111280d376c91bd5464a9c6129e0919a3a2e_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:38,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742240_1416 (size=14994) 2024-11-12T19:34:38,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440138915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:38,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440138923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:38,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440138923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440139023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440139034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440139034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,135 DEBUG [Thread-1527 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06179765 to 127.0.0.1:60358 2024-11-12T19:34:39,135 DEBUG [Thread-1527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:39,136 DEBUG [Thread-1523 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c35c7c4 to 127.0.0.1:60358 2024-11-12T19:34:39,136 DEBUG [Thread-1523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:39,137 DEBUG [Thread-1521 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62cf69c5 to 127.0.0.1:60358 2024-11-12T19:34:39,137 DEBUG [Thread-1521 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:39,139 DEBUG [Thread-1519 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c80a40c to 127.0.0.1:60358 2024-11-12T19:34:39,139 DEBUG [Thread-1519 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:39,145 DEBUG [Thread-1525 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a259e93 to 127.0.0.1:60358 2024-11-12T19:34:39,145 DEBUG [Thread-1525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:39,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440139229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440139239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440139239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,258 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:39,261 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111280d376c91bd5464a9c6129e0919a3a2e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111280d376c91bd5464a9c6129e0919a3a2e_758f25fb434410405582dc106004e936 2024-11-12T19:34:39,262 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/35dd63642a8846f7ace9722e44cc634e, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:39,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/35dd63642a8846f7ace9722e44cc634e is 175, key is test_row_0/A:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:39,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742241_1417 (size=39949) 2024-11-12T19:34:39,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440139531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440139541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:39,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440139542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:39,669 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/35dd63642a8846f7ace9722e44cc634e 2024-11-12T19:34:39,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/b7ef0dad80634346b452dbad5c97d907 is 50, key is test_row_0/B:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:39,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742242_1418 (size=12301) 2024-11-12T19:34:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-12T19:34:39,822 INFO [Thread-1518 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-12T19:34:40,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36334 deadline: 1731440140036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:40,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36302 deadline: 1731440140044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:40,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36278 deadline: 1731440140045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:40,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/b7ef0dad80634346b452dbad5c97d907 2024-11-12T19:34:40,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/5c118242f2134c4b9f90a125b4346fa1 is 50, key is test_row_0/C:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:40,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742243_1419 (size=12301) 2024-11-12T19:34:40,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/5c118242f2134c4b9f90a125b4346fa1 2024-11-12T19:34:40,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/35dd63642a8846f7ace9722e44cc634e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e 2024-11-12T19:34:40,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e, entries=200, sequenceid=331, filesize=39.0 K 2024-11-12T19:34:40,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/b7ef0dad80634346b452dbad5c97d907 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907 2024-11-12T19:34:40,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907, entries=150, sequenceid=331, filesize=12.0 K 2024-11-12T19:34:40,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/5c118242f2134c4b9f90a125b4346fa1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1 2024-11-12T19:34:40,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1, entries=150, sequenceid=331, filesize=12.0 K 2024-11-12T19:34:40,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 758f25fb434410405582dc106004e936 in 1689ms, sequenceid=331, compaction requested=true 2024-11-12T19:34:40,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:40,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:40,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:40,537 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:40,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:40,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:40,537 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:40,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 758f25fb434410405582dc106004e936:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:40,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:40,537 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:40,537 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/A is initiating minor compaction (all files) 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/B is initiating minor compaction (all files) 2024-11-12T19:34:40,538 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/B in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:40,538 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/A in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:40,538 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=100.7 K 2024-11-12T19:34:40,538 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/12bcff7f816544848286219d72bad7cb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=36.7 K 2024-11-12T19:34:40,538 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e] 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 12bcff7f816544848286219d72bad7cb, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074370 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d64a525a64d2456a9b7bdab248961eb5, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074370 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d139f242ab104e7db1e0a9062f211782, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440075551 2024-11-12T19:34:40,538 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2f5fb497a3042c09761eff32757b9e9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440075551 2024-11-12T19:34:40,539 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting b7ef0dad80634346b452dbad5c97d907, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731440076708 2024-11-12T19:34:40,539 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35dd63642a8846f7ace9722e44cc634e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731440076703 2024-11-12T19:34:40,545 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:40,546 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#B#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:40,546 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6fd9afb826f14103a3759ac9d6b4c1e4 is 50, key is test_row_0/B:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:40,548 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112a0f71116fd444d2288be0fe2419dc4c2_758f25fb434410405582dc106004e936 store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:40,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742244_1420 (size=13051) 2024-11-12T19:34:40,557 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112a0f71116fd444d2288be0fe2419dc4c2_758f25fb434410405582dc106004e936, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:40,558 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a0f71116fd444d2288be0fe2419dc4c2_758f25fb434410405582dc106004e936 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:40,562 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/6fd9afb826f14103a3759ac9d6b4c1e4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6fd9afb826f14103a3759ac9d6b4c1e4 2024-11-12T19:34:40,567 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/B of 758f25fb434410405582dc106004e936 into 6fd9afb826f14103a3759ac9d6b4c1e4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:40,568 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:40,568 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/B, priority=13, startTime=1731440080537; duration=0sec 2024-11-12T19:34:40,568 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:40,568 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:B 2024-11-12T19:34:40,568 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:40,569 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:40,569 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 758f25fb434410405582dc106004e936/C is initiating minor compaction (all files) 2024-11-12T19:34:40,569 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 758f25fb434410405582dc106004e936/C in TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:40,569 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/ba5ab8e6f7a34906b451420a95a592a5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp, totalSize=36.7 K 2024-11-12T19:34:40,569 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting ba5ab8e6f7a34906b451420a95a592a5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731440074370 2024-11-12T19:34:40,570 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting da46612ebb2841e2a3d110eeedcedf08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731440075551 2024-11-12T19:34:40,570 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c118242f2134c4b9f90a125b4346fa1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731440076708 2024-11-12T19:34:40,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742245_1421 (size=4469) 2024-11-12T19:34:40,587 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#C#compaction#350 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:40,588 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/2f0c5104d63145fa9be00b2b6248804b is 50, key is test_row_0/C:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:40,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742246_1422 (size=13051) 2024-11-12T19:34:40,943 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b] to archive 2024-11-12T19:34:40,944 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:40,946 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/bf47fea3631a4e369c29e92e0c768738 2024-11-12T19:34:40,948 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebc97ad9384249ea8bdb2952435adb5c 2024-11-12T19:34:40,949 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/78be46fc33f140a7a82ec894fffe6af5 2024-11-12T19:34:40,950 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/adfef077ac7a4626a71982a735c1f9ef 2024-11-12T19:34:40,952 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/708ce42716c047709ecffcab670a9ef3 2024-11-12T19:34:40,953 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d8aff2c4a87a45a6b126c9ceb616d202 2024-11-12T19:34:40,955 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/024893887e8243c59727d136638cff28 2024-11-12T19:34:40,956 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/7fadfeb0e3284a0584eacd05cbffff12 2024-11-12T19:34:40,957 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ff46bc424f16482491a1984636b3f3fe 2024-11-12T19:34:40,959 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/63a8c9a44d4848bbb38c7499805edb12 2024-11-12T19:34:40,960 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9676030831264813b9cbecf025682980 2024-11-12T19:34:40,962 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/0a4c42402e804d99bd0d20ee99033604 2024-11-12T19:34:40,963 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a6ab8f0a6a7641469ac993c68c539235 2024-11-12T19:34:40,964 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/ebae37f42d2843f985a2378f353ed16c 2024-11-12T19:34:40,966 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/cf2b2672b7b245c29898a5298bdc685a 2024-11-12T19:34:40,967 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/23514fe3ea1f442690e78ac67a404db6 2024-11-12T19:34:40,968 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/9cda07c5f123448791c4d951d630d7ab 2024-11-12T19:34:40,970 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/38ccc943be334bd494b4718768842125 2024-11-12T19:34:40,971 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/b52201543e4f432b9beb4bec4d39b87b 2024-11-12T19:34:40,975 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a17fe2454a264388b6e78929f782d608, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/47c8597c43c3481abb2d058b9f71272c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e028b01e5e464b3c81de0136b011940d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6c9d020516194f938fa43989425c7fb4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/12bcff7f816544848286219d72bad7cb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907] to archive 2024-11-12T19:34:40,976 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:40,978 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/92e2731190e04c2ab796458b59436c73 2024-11-12T19:34:40,979 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/111f4ff521b34517bc554c0726866a8b 2024-11-12T19:34:40,980 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/1eb425dd9a77489cbee7e5c9792d11ea 2024-11-12T19:34:40,981 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 758f25fb434410405582dc106004e936#A#compaction#349 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:40,981 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/93e9b251c1d3446398143c1af2636089 is 175, key is test_row_0/A:col10/1731440078846/Put/seqid=0 2024-11-12T19:34:40,982 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a17fe2454a264388b6e78929f782d608 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a17fe2454a264388b6e78929f782d608 2024-11-12T19:34:40,983 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/ce523650f457449ab17d8c8de60da30d 2024-11-12T19:34:40,984 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/cc98873a3493454b9993521aa6c2e77b 2024-11-12T19:34:40,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742247_1423 (size=31358) 2024-11-12T19:34:40,985 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/47c8597c43c3481abb2d058b9f71272c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/47c8597c43c3481abb2d058b9f71272c 2024-11-12T19:34:40,986 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0f40d7ca193b4e50b2467da8f3ef9dfd 2024-11-12T19:34:40,988 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/0281444efbb342c58a1c70e5120b4b76 2024-11-12T19:34:40,989 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6957a8b0bf5448fb97616c17369ce48d 2024-11-12T19:34:40,990 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e028b01e5e464b3c81de0136b011940d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e028b01e5e464b3c81de0136b011940d 2024-11-12T19:34:40,991 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/f735ec199df54240931ea7baedc7c0e5 2024-11-12T19:34:40,992 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/86e65b910e314960b6617290c5508b64 2024-11-12T19:34:40,994 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/21ac70411b554fcf82c5a50af16f1263 2024-11-12T19:34:40,995 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6c9d020516194f938fa43989425c7fb4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6c9d020516194f938fa43989425c7fb4 2024-11-12T19:34:40,996 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/2f0c5104d63145fa9be00b2b6248804b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/2f0c5104d63145fa9be00b2b6248804b 2024-11-12T19:34:40,996 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/a1bd5ac268d748f3b2a849362b1a29e6 2024-11-12T19:34:40,997 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/121d255d03a04cd496d6bdabaaf11e4b 2024-11-12T19:34:40,999 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/56d7b441bdb54d97898e9266e121a9ad 2024-11-12T19:34:41,000 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/12bcff7f816544848286219d72bad7cb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/12bcff7f816544848286219d72bad7cb 2024-11-12T19:34:41,001 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/C of 758f25fb434410405582dc106004e936 into 2f0c5104d63145fa9be00b2b6248804b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:41,001 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:41,001 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/C, priority=13, startTime=1731440080537; duration=0sec 2024-11-12T19:34:41,001 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:41,001 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:C 2024-11-12T19:34:41,001 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/e43d1e67d8fc43868bb6376da6d99294 2024-11-12T19:34:41,002 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/d139f242ab104e7db1e0a9062f211782 2024-11-12T19:34:41,003 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/b7ef0dad80634346b452dbad5c97d907 2024-11-12T19:34:41,006 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0da3c2a0ea5d477f89d8a860b69d4168, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c3b693f4d7044905a2fd190260cb6f84, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3df8f9a66f2443d6a07074b8a6291df3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/24e53b68365e464ca1a2160243587af4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/ba5ab8e6f7a34906b451420a95a592a5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1] to archive 2024-11-12T19:34:41,007 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:41,008 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d477e747894e4ed1bc7e0bfeaabe2fac 2024-11-12T19:34:41,009 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c2f347d1c60b43fbaf18174938e1cae9 2024-11-12T19:34:41,010 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/9c1ae73ea6d647899e3b215616fd81d8 2024-11-12T19:34:41,011 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0da3c2a0ea5d477f89d8a860b69d4168 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0da3c2a0ea5d477f89d8a860b69d4168 2024-11-12T19:34:41,012 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/788d9415f58b4c439b7b5d6f2dfecc82 2024-11-12T19:34:41,013 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/b8234d06264d41e7ac445c4d2e1420a5 2024-11-12T19:34:41,014 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c3b693f4d7044905a2fd190260cb6f84 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c3b693f4d7044905a2fd190260cb6f84 2024-11-12T19:34:41,015 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/1b04518687ac4d06902afb2011197e85 2024-11-12T19:34:41,016 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/e0c065022f6e4bfcb93b3c3708a71004 2024-11-12T19:34:41,017 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/f03c0423a8cd4fe89404fa68261748bd 2024-11-12T19:34:41,018 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3df8f9a66f2443d6a07074b8a6291df3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3df8f9a66f2443d6a07074b8a6291df3 2024-11-12T19:34:41,019 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/50f1ad82d1544b23866fcb65a3abd186 2024-11-12T19:34:41,020 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/3abd8b248f224d0c9df5ae55e9104b68 2024-11-12T19:34:41,022 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/470c90be92494757b30097eae7fee8c3 2024-11-12T19:34:41,023 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/24e53b68365e464ca1a2160243587af4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/24e53b68365e464ca1a2160243587af4 2024-11-12T19:34:41,023 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/301be698710f4cefaf017f414880807c 2024-11-12T19:34:41,024 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/c0ba55af17d14d13b6065dd934ee9838 2024-11-12T19:34:41,025 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/0935a0b1c03b45789c14743253f9f7e8 2024-11-12T19:34:41,026 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/ba5ab8e6f7a34906b451420a95a592a5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/ba5ab8e6f7a34906b451420a95a592a5 2024-11-12T19:34:41,026 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/6ae3c91fb6fd4d19bc82efe30bba8f14 2024-11-12T19:34:41,027 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/da46612ebb2841e2a3d110eeedcedf08 2024-11-12T19:34:41,028 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/81d69e608036:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/5c118242f2134c4b9f90a125b4346fa1 2024-11-12T19:34:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 758f25fb434410405582dc106004e936 2024-11-12T19:34:41,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:34:41,046 DEBUG [Thread-1514 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x789089aa to 127.0.0.1:60358 2024-11-12T19:34:41,046 DEBUG [Thread-1514 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:41,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:41,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:41,048 DEBUG [Thread-1516 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x699c96a7 to 127.0.0.1:60358 2024-11-12T19:34:41,048 DEBUG [Thread-1516 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:41,053 DEBUG [Thread-1510 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2076b3ad to 127.0.0.1:60358 2024-11-12T19:34:41,053 DEBUG [Thread-1510 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:41,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d95da66385b04467a06dcad13ef50bd8_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440078917/Put/seqid=0 2024-11-12T19:34:41,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742248_1424 (size=12454) 2024-11-12T19:34:41,390 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/93e9b251c1d3446398143c1af2636089 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/93e9b251c1d3446398143c1af2636089 2024-11-12T19:34:41,395 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 758f25fb434410405582dc106004e936/A of 758f25fb434410405582dc106004e936 into 93e9b251c1d3446398143c1af2636089(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:41,396 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:41,396 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936., storeName=758f25fb434410405582dc106004e936/A, priority=13, startTime=1731440080536; duration=0sec 2024-11-12T19:34:41,396 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:41,396 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 758f25fb434410405582dc106004e936:A 2024-11-12T19:34:41,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:41,464 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d95da66385b04467a06dcad13ef50bd8_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d95da66385b04467a06dcad13ef50bd8_758f25fb434410405582dc106004e936 2024-11-12T19:34:41,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/43911b0995d9444b8ea8cc652f79b729, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:41,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/43911b0995d9444b8ea8cc652f79b729 is 175, key is test_row_0/A:col10/1731440078917/Put/seqid=0 2024-11-12T19:34:41,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742249_1425 (size=31255) 2024-11-12T19:34:41,871 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/43911b0995d9444b8ea8cc652f79b729 2024-11-12T19:34:41,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/bc0d6d4a99e64328bcb6a292b2a04619 is 50, key is test_row_0/B:col10/1731440078917/Put/seqid=0 2024-11-12T19:34:41,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742250_1426 (size=12301) 2024-11-12T19:34:42,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/bc0d6d4a99e64328bcb6a292b2a04619 2024-11-12T19:34:42,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/bad045ba38c94da0af9490c07bc5ae1a is 50, key is test_row_0/C:col10/1731440078917/Put/seqid=0 2024-11-12T19:34:42,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742251_1427 (size=12301) 2024-11-12T19:34:42,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/bad045ba38c94da0af9490c07bc5ae1a 2024-11-12T19:34:42,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/43911b0995d9444b8ea8cc652f79b729 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/43911b0995d9444b8ea8cc652f79b729 2024-11-12T19:34:42,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/43911b0995d9444b8ea8cc652f79b729, entries=150, sequenceid=356, filesize=30.5 K 2024-11-12T19:34:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/bc0d6d4a99e64328bcb6a292b2a04619 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/bc0d6d4a99e64328bcb6a292b2a04619 2024-11-12T19:34:42,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/bc0d6d4a99e64328bcb6a292b2a04619, entries=150, sequenceid=356, filesize=12.0 K 2024-11-12T19:34:42,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/bad045ba38c94da0af9490c07bc5ae1a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/bad045ba38c94da0af9490c07bc5ae1a 2024-11-12T19:34:42,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/bad045ba38c94da0af9490c07bc5ae1a, entries=150, sequenceid=356, filesize=12.0 K 2024-11-12T19:34:42,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=13.42 KB/13740 for 758f25fb434410405582dc106004e936 in 1676ms, sequenceid=356, compaction requested=false 2024-11-12T19:34:42,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:44,067 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:45,890 DEBUG [Thread-1512 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20c5edec to 127.0.0.1:60358 2024-11-12T19:34:45,890 DEBUG [Thread-1512 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:45,896 ERROR [LeaseRenewer:jenkins@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:41367,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:45,936 DEBUG [Thread-1508 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2953086d to 127.0.0.1:60358 2024-11-12T19:34:45,936 DEBUG [Thread-1508 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:34:45,936 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1599 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4793 rows 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1607 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4820 rows 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1586 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4757 rows 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1589 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4765 rows 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1604 2024-11-12T19:34:45,937 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4811 rows 2024-11-12T19:34:45,937 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:34:45,937 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x474d5947 to 127.0.0.1:60358 2024-11-12T19:34:45,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:34:45,939 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:34:45,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:34:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:45,944 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440085943"}]},"ts":"1731440085943"} 2024-11-12T19:34:45,945 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:34:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:45,972 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:34:45,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:34:45,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, UNASSIGN}] 2024-11-12T19:34:45,975 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, UNASSIGN 2024-11-12T19:34:45,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:45,977 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:34:45,977 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; CloseRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:46,129 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:46,130 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(124): Close 758f25fb434410405582dc106004e936 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1681): Closing 758f25fb434410405582dc106004e936, disabling compactions & flushes 2024-11-12T19:34:46,130 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. after waiting 0 ms 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:46,130 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2837): Flushing 758f25fb434410405582dc106004e936 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=A 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:46,130 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=B 2024-11-12T19:34:46,131 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:46,131 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 758f25fb434410405582dc106004e936, store=C 2024-11-12T19:34:46,131 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:46,146 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127d0e1363fe034a149dfdac322fc21a7e_758f25fb434410405582dc106004e936 is 50, key is test_row_0/A:col10/1731440081052/Put/seqid=0 2024-11-12T19:34:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742252_1428 (size=9914) 2024-11-12T19:34:46,156 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:46,160 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411127d0e1363fe034a149dfdac322fc21a7e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127d0e1363fe034a149dfdac322fc21a7e_758f25fb434410405582dc106004e936 2024-11-12T19:34:46,161 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/f6175b8af03a48268abbcd1bbb54e103, store: [table=TestAcidGuarantees family=A region=758f25fb434410405582dc106004e936] 2024-11-12T19:34:46,161 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/f6175b8af03a48268abbcd1bbb54e103 is 175, key is test_row_0/A:col10/1731440081052/Put/seqid=0 2024-11-12T19:34:46,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742253_1429 (size=22561) 2024-11-12T19:34:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:46,565 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/f6175b8af03a48268abbcd1bbb54e103 2024-11-12T19:34:46,571 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/da1e06d8d46c40839c0b76ab4061b2b6 is 50, key is test_row_0/B:col10/1731440081052/Put/seqid=0 2024-11-12T19:34:46,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742254_1430 (size=9857) 2024-11-12T19:34:46,977 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/da1e06d8d46c40839c0b76ab4061b2b6 2024-11-12T19:34:46,984 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d6f8a42528e147e7aa0165fc97c43b11 is 50, key is test_row_0/C:col10/1731440081052/Put/seqid=0 2024-11-12T19:34:46,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742255_1431 (size=9857) 2024-11-12T19:34:47,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:47,392 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d6f8a42528e147e7aa0165fc97c43b11 2024-11-12T19:34:47,395 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/A/f6175b8af03a48268abbcd1bbb54e103 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/f6175b8af03a48268abbcd1bbb54e103 2024-11-12T19:34:47,399 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/f6175b8af03a48268abbcd1bbb54e103, entries=100, sequenceid=364, filesize=22.0 K 2024-11-12T19:34:47,399 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/B/da1e06d8d46c40839c0b76ab4061b2b6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/da1e06d8d46c40839c0b76ab4061b2b6 2024-11-12T19:34:47,402 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/da1e06d8d46c40839c0b76ab4061b2b6, entries=100, sequenceid=364, filesize=9.6 K 2024-11-12T19:34:47,403 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/.tmp/C/d6f8a42528e147e7aa0165fc97c43b11 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d6f8a42528e147e7aa0165fc97c43b11 2024-11-12T19:34:47,406 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d6f8a42528e147e7aa0165fc97c43b11, entries=100, sequenceid=364, filesize=9.6 K 2024-11-12T19:34:47,406 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 758f25fb434410405582dc106004e936 in 1276ms, sequenceid=364, compaction requested=true 2024-11-12T19:34:47,407 DEBUG [StoreCloser-TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e] to archive 2024-11-12T19:34:47,407 DEBUG [StoreCloser-TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:34:47,409 DEBUG [StoreCloser-TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/d64a525a64d2456a9b7bdab248961eb5 2024-11-12T19:34:47,410 DEBUG [StoreCloser-TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/a2f5fb497a3042c09761eff32757b9e9 2024-11-12T19:34:47,410 DEBUG [StoreCloser-TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/35dd63642a8846f7ace9722e44cc634e 2024-11-12T19:34:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits/367.seqid, newMaxSeqId=367, maxSeqId=4 2024-11-12T19:34:47,415 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936. 2024-11-12T19:34:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1635): Region close journal for 758f25fb434410405582dc106004e936: 2024-11-12T19:34:47,417 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(170): Closed 758f25fb434410405582dc106004e936 2024-11-12T19:34:47,417 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=758f25fb434410405582dc106004e936, regionState=CLOSED 2024-11-12T19:34:47,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-12T19:34:47,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseRegionProcedure 758f25fb434410405582dc106004e936, server=81d69e608036,33067,1731439956493 in 1.4410 sec 2024-11-12T19:34:47,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-11-12T19:34:47,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=758f25fb434410405582dc106004e936, UNASSIGN in 1.4440 sec 2024-11-12T19:34:47,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-12T19:34:47,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4470 sec 2024-11-12T19:34:47,422 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440087422"}]},"ts":"1731440087422"} 2024-11-12T19:34:47,423 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:34:47,744 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:34:47,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8040 sec 2024-11-12T19:34:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-12T19:34:48,052 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-12T19:34:48,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:34:48,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,054 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-12T19:34:48,055 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=114, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,057 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:48,059 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits] 2024-11-12T19:34:48,063 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/43911b0995d9444b8ea8cc652f79b729 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/43911b0995d9444b8ea8cc652f79b729 2024-11-12T19:34:48,064 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/93e9b251c1d3446398143c1af2636089 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/93e9b251c1d3446398143c1af2636089 2024-11-12T19:34:48,065 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/f6175b8af03a48268abbcd1bbb54e103 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/A/f6175b8af03a48268abbcd1bbb54e103 2024-11-12T19:34:48,068 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6fd9afb826f14103a3759ac9d6b4c1e4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/6fd9afb826f14103a3759ac9d6b4c1e4 2024-11-12T19:34:48,069 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/bc0d6d4a99e64328bcb6a292b2a04619 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/bc0d6d4a99e64328bcb6a292b2a04619 2024-11-12T19:34:48,070 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/da1e06d8d46c40839c0b76ab4061b2b6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/B/da1e06d8d46c40839c0b76ab4061b2b6 2024-11-12T19:34:48,076 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/2f0c5104d63145fa9be00b2b6248804b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/2f0c5104d63145fa9be00b2b6248804b 2024-11-12T19:34:48,077 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/bad045ba38c94da0af9490c07bc5ae1a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/bad045ba38c94da0af9490c07bc5ae1a 2024-11-12T19:34:48,079 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d6f8a42528e147e7aa0165fc97c43b11 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/C/d6f8a42528e147e7aa0165fc97c43b11 2024-11-12T19:34:48,085 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits/367.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936/recovered.edits/367.seqid 2024-11-12T19:34:48,087 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/758f25fb434410405582dc106004e936 2024-11-12T19:34:48,087 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:34:48,088 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:34:48,089 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-12T19:34:48,092 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120287234240d44ccfba76ab8954665d37_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120287234240d44ccfba76ab8954665d37_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,093 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120401cda48df246aa9022e0aa97d16a18_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120401cda48df246aa9022e0aa97d16a18_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,095 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120560d26bd8a7416dbe193508501e7511_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411120560d26bd8a7416dbe193508501e7511_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,098 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212fdb01409c644009daa8843123b0c61_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212fdb01409c644009daa8843123b0c61_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,100 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f7c6a1e3e1340fb931f2ffdacc0bc28_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125f7c6a1e3e1340fb931f2ffdacc0bc28_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,101 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411126491fe25944f407a9d34912580e2e00e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411126491fe25944f407a9d34912580e2e00e_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,102 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127bdc7a80abf04aab9aad086b80b3f9ef_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127bdc7a80abf04aab9aad086b80b3f9ef_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,103 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127d0e1363fe034a149dfdac322fc21a7e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411127d0e1363fe034a149dfdac322fc21a7e_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,103 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111280d376c91bd5464a9c6129e0919a3a2e_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111280d376c91bd5464a9c6129e0919a3a2e_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,105 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111283c1a28e3de94d12b4acdb1e5604fb4d_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111283c1a28e3de94d12b4acdb1e5604fb4d_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,106 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128a496a79a3cd41c88bedbcb79e15b553_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411128a496a79a3cd41c88bedbcb79e15b553_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,107 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a65746bb27fd40ba9cadfddff8d2ce9f_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a65746bb27fd40ba9cadfddff8d2ce9f_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,108 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a96ff591a26a4ba2a3c69dfd26c9383a_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a96ff591a26a4ba2a3c69dfd26c9383a_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,109 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d95da66385b04467a06dcad13ef50bd8_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d95da66385b04467a06dcad13ef50bd8_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,109 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e0a6eb72f035473f800918c81d9a7ec6_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e0a6eb72f035473f800918c81d9a7ec6_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,110 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e34f63b521504362bbdeabe3ce1f68c6_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112e34f63b521504362bbdeabe3ce1f68c6_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,111 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ea4af62b27024dbb907273a81cf56982_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ea4af62b27024dbb907273a81cf56982_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,113 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ec47212587ad4741ba6243058406929f_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ec47212587ad4741ba6243058406929f_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,115 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112f5dd831204ae4550bc9613782a2ad141_758f25fb434410405582dc106004e936 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112f5dd831204ae4550bc9613782a2ad141_758f25fb434410405582dc106004e936 2024-11-12T19:34:48,115 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:34:48,117 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=114, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,119 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:34:48,122 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:34:48,123 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=114, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,123 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:34:48,123 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731440088123"}]},"ts":"9223372036854775807"} 2024-11-12T19:34:48,125 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:34:48,125 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 758f25fb434410405582dc106004e936, NAME => 'TestAcidGuarantees,,1731440055886.758f25fb434410405582dc106004e936.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:34:48,126 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:34:48,126 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731440088126"}]},"ts":"9223372036854775807"} 2024-11-12T19:34:48,128 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:34:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-12T19:34:48,158 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=114, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 105 msec 2024-11-12T19:34:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-12T19:34:48,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-12T19:34:48,367 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=235 (was 235), OpenFileDescriptor=451 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1109 (was 1187), ProcessCount=11 (was 11), AvailableMemoryMB=1350 (was 1033) - AvailableMemoryMB LEAK? - 2024-11-12T19:34:48,379 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=235, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=1109, ProcessCount=11, AvailableMemoryMB=1349 2024-11-12T19:34:48,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:34:48,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:34:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:34:48,382 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:34:48,382 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:48,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 115 2024-11-12T19:34:48,383 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:34:48,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742256_1432 (size=960) 2024-11-12T19:34:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:48,792 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:34:48,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742257_1433 (size=53) 2024-11-12T19:34:48,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ce8b2c342e0c55d57c9696ce6e06a527, disabling compactions & flushes 2024-11-12T19:34:49,220 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. after waiting 0 ms 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,220 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:49,221 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:34:49,221 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731440089221"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731440089221"}]},"ts":"1731440089221"} 2024-11-12T19:34:49,222 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:34:49,223 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:34:49,223 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440089223"}]},"ts":"1731440089223"} 2024-11-12T19:34:49,223 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:34:49,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, ASSIGN}] 2024-11-12T19:34:49,271 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, ASSIGN 2024-11-12T19:34:49,271 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:34:49,422 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ce8b2c342e0c55d57c9696ce6e06a527, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:49,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; OpenRegionProcedure ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:34:49,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:49,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:49,578 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,578 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7285): Opening region: {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:34:49,579 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,579 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:34:49,579 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7327): checking encryption for ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,579 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7330): checking classloading for ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,580 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,581 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:49,581 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce8b2c342e0c55d57c9696ce6e06a527 columnFamilyName A 2024-11-12T19:34:49,581 DEBUG [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:49,583 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(327): Store=ce8b2c342e0c55d57c9696ce6e06a527/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:49,583 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,584 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:49,584 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce8b2c342e0c55d57c9696ce6e06a527 columnFamilyName B 2024-11-12T19:34:49,585 DEBUG [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:49,585 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(327): Store=ce8b2c342e0c55d57c9696ce6e06a527/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:49,585 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,586 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:34:49,587 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce8b2c342e0c55d57c9696ce6e06a527 columnFamilyName C 2024-11-12T19:34:49,587 DEBUG [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:34:49,587 INFO [StoreOpener-ce8b2c342e0c55d57c9696ce6e06a527-1 {}] regionserver.HStore(327): Store=ce8b2c342e0c55d57c9696ce6e06a527/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:34:49,587 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,588 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,588 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,590 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:34:49,591 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1085): writing seq id for ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:49,592 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:34:49,593 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1102): Opened ce8b2c342e0c55d57c9696ce6e06a527; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75118586, jitterRate=0.11935415863990784}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:34:49,593 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1001): Region open journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:49,594 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., pid=117, masterSystemTime=1731440089575 2024-11-12T19:34:49,595 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,595 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:49,596 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ce8b2c342e0c55d57c9696ce6e06a527, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:34:49,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-12T19:34:49,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; OpenRegionProcedure ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 in 174 msec 2024-11-12T19:34:49,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-11-12T19:34:49,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, ASSIGN in 330 msec 2024-11-12T19:34:49,602 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:34:49,602 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440089602"}]},"ts":"1731440089602"} 2024-11-12T19:34:49,603 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:34:49,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:34:49,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2350 sec 2024-11-12T19:34:50,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-12T19:34:50,487 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 115 completed 2024-11-12T19:34:50,489 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048087da to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59daaa82 2024-11-12T19:34:50,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aaa8c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,539 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,542 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,551 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:34:50,552 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:34:50,554 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x345fa4f7 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38dd8644 2024-11-12T19:34:50,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@466b85c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x315a23ef to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65e17c26 2024-11-12T19:34:50,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3ee89e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d125972 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53fc02ba 2024-11-12T19:34:50,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0e6a43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,665 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x134bfe32 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2011d733 2024-11-12T19:34:50,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e5fd00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,688 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b55f2f to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b3baa5 2024-11-12T19:34:50,709 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e195d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x402e5def to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14088aa9 2024-11-12T19:34:50,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23090be3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,731 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10bda459 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40302925 2024-11-12T19:34:50,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8d64d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,749 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0657e1bf to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47ef9951 2024-11-12T19:34:50,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784d683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dee2855 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@567011a8 2024-11-12T19:34:50,811 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7761f52b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,812 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54e8a98a to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2430fee 2024-11-12T19:34:50,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a736a20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:34:50,861 DEBUG [hconnection-0x7564581d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,861 DEBUG [hconnection-0x7c70760d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:50,862 DEBUG [hconnection-0x369c5f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,862 DEBUG [hconnection-0x75953404-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-12T19:34:50,863 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56712, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,863 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,863 DEBUG [hconnection-0x62d64946-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,863 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56736, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,864 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,865 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,865 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:50,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:50,866 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:50,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:50,868 DEBUG [hconnection-0x7dfc6b37-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,868 DEBUG [hconnection-0x4f11267e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,869 DEBUG [hconnection-0x35c5addb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,869 DEBUG [hconnection-0x195327bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,869 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,870 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,870 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,870 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,872 DEBUG [hconnection-0x6d7675eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:34:50,874 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:34:50,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:50,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-12T19:34:50,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:50,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:50,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:50,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:50,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:50,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:50,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2f9e7526f4bc4c108c25946a86e46fb5 is 50, key is test_row_0/A:col10/1731440090877/Put/seqid=0 2024-11-12T19:34:50,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:50,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440150917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:50,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:50,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440150920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:50,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:50,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440150923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:50,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:50,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440150924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:50,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:50,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440150927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:50,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742258_1434 (size=12001) 2024-11-12T19:34:50,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:51,018 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440151024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440151024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440151025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440151025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440151029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:51,175 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440151228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440151227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440151230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440151231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440151241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2f9e7526f4bc4c108c25946a86e46fb5 2024-11-12T19:34:51,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4d3293e4c11a4c6bac5d622323d06356 is 50, key is test_row_0/B:col10/1731440090877/Put/seqid=0 2024-11-12T19:34:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742259_1435 (size=12001) 2024-11-12T19:34:51,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4d3293e4c11a4c6bac5d622323d06356 2024-11-12T19:34:51,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/9bdebbe0d32c47cd90eae15a299796ff is 50, key is test_row_0/C:col10/1731440090877/Put/seqid=0 2024-11-12T19:34:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742260_1436 (size=12001) 2024-11-12T19:34:51,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/9bdebbe0d32c47cd90eae15a299796ff 2024-11-12T19:34:51,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2f9e7526f4bc4c108c25946a86e46fb5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5 2024-11-12T19:34:51,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:51,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5, entries=150, sequenceid=14, filesize=11.7 K 2024-11-12T19:34:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4d3293e4c11a4c6bac5d622323d06356 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356 2024-11-12T19:34:51,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356, entries=150, sequenceid=14, filesize=11.7 K 2024-11-12T19:34:51,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/9bdebbe0d32c47cd90eae15a299796ff as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff 2024-11-12T19:34:51,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff, entries=150, sequenceid=14, filesize=11.7 K 2024-11-12T19:34:51,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for ce8b2c342e0c55d57c9696ce6e06a527 in 635ms, sequenceid=14, compaction requested=false 2024-11-12T19:34:51,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:51,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:51,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:51,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0411a6051b5444e489e974ca0c378c35 is 50, key is test_row_0/A:col10/1731440090920/Put/seqid=0 2024-11-12T19:34:51,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440151550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440151552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440151554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440151556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440151559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742261_1437 (size=14341) 2024-11-12T19:34:51,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0411a6051b5444e489e974ca0c378c35 2024-11-12T19:34:51,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/24e02d144aaa45deb23e2848aeb19368 is 50, key is test_row_0/B:col10/1731440090920/Put/seqid=0 2024-11-12T19:34:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742262_1438 (size=12001) 2024-11-12T19:34:51,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440151660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440151665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440151670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440151670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440151671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,814 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440151867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440151878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440151882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440151882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440151884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:51,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:51,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:51,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:51,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:51,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/24e02d144aaa45deb23e2848aeb19368 2024-11-12T19:34:52,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/7ad22d13bf1b4b1e895a1cac199acfd9 is 50, key is test_row_0/C:col10/1731440090920/Put/seqid=0 2024-11-12T19:34:52,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742263_1439 (size=12001) 2024-11-12T19:34:52,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/7ad22d13bf1b4b1e895a1cac199acfd9 2024-11-12T19:34:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0411a6051b5444e489e974ca0c378c35 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35 2024-11-12T19:34:52,131 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35, entries=200, sequenceid=40, filesize=14.0 K 2024-11-12T19:34:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/24e02d144aaa45deb23e2848aeb19368 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368 2024-11-12T19:34:52,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368, entries=150, sequenceid=40, filesize=11.7 K 2024-11-12T19:34:52,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/7ad22d13bf1b4b1e895a1cac199acfd9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9 2024-11-12T19:34:52,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9, entries=150, sequenceid=40, filesize=11.7 K 2024-11-12T19:34:52,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ce8b2c342e0c55d57c9696ce6e06a527 in 617ms, sequenceid=40, compaction requested=false 2024-11-12T19:34:52,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:52,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:52,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:52,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/8264026ce8a14545acde58e6bc2eca8e is 50, key is test_row_0/A:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:52,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742264_1440 (size=12001) 2024-11-12T19:34:52,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/8264026ce8a14545acde58e6bc2eca8e 2024-11-12T19:34:52,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/9631362c835c42b6a2abe6a3279271ed is 50, key is test_row_0/B:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742265_1441 (size=12001) 2024-11-12T19:34:52,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440152285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440152285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440152286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440152289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440152299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440152400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440152402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440152403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440152407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440152411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,441 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,597 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440152608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440152609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440152610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440152617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440152621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/9631362c835c42b6a2abe6a3279271ed 2024-11-12T19:34:52,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b35f9a4a798c435b93b933b067ddafa4 is 50, key is test_row_0/C:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742266_1442 (size=12001) 2024-11-12T19:34:52,750 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440152920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440152920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440152923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,927 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:52,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:52,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:52,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:52,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:52,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440152928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440152930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:52,957 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:34:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:53,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:53,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:53,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:53,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:53,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:53,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b35f9a4a798c435b93b933b067ddafa4 2024-11-12T19:34:53,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/8264026ce8a14545acde58e6bc2eca8e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e 2024-11-12T19:34:53,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e, entries=150, sequenceid=51, filesize=11.7 K 2024-11-12T19:34:53,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/9631362c835c42b6a2abe6a3279271ed as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed 2024-11-12T19:34:53,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed, entries=150, sequenceid=51, filesize=11.7 K 2024-11-12T19:34:53,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b35f9a4a798c435b93b933b067ddafa4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4 2024-11-12T19:34:53,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4, entries=150, sequenceid=51, filesize=11.7 K 2024-11-12T19:34:53,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ce8b2c342e0c55d57c9696ce6e06a527 in 1066ms, sequenceid=51, compaction requested=true 2024-11-12T19:34:53,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:53,246 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:53,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:53,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:53,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:53,246 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:53,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:53,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:53,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:53,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:53,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-12T19:34:53,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,250 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:53,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:53,256 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:53,256 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:34:53,256 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,256 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=37.4 K 2024-11-12T19:34:53,256 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:53,256 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:34:53,256 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,256 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=35.2 K 2024-11-12T19:34:53,260 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d3293e4c11a4c6bac5d622323d06356, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731440090873 2024-11-12T19:34:53,260 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f9e7526f4bc4c108c25946a86e46fb5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731440090873 2024-11-12T19:34:53,264 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 24e02d144aaa45deb23e2848aeb19368, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731440090915 2024-11-12T19:34:53,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/42adcf78ac5a4e189aaf156a8d13e418 is 50, key is test_row_0/A:col10/1731440092296/Put/seqid=0 2024-11-12T19:34:53,264 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0411a6051b5444e489e974ca0c378c35, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731440090915 2024-11-12T19:34:53,270 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8264026ce8a14545acde58e6bc2eca8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:53,275 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9631362c835c42b6a2abe6a3279271ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742267_1443 (size=12001) 2024-11-12T19:34:53,317 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#367 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:53,317 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7f0f4993cdbd4d819b6e45765bf09474 is 50, key is test_row_0/B:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:53,320 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#368 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:53,320 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/35516ff644cf4bc1b1ee4fb45a9b5e0d is 50, key is test_row_0/A:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742268_1444 (size=12104) 2024-11-12T19:34:53,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742269_1445 (size=12104) 2024-11-12T19:34:53,382 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/35516ff644cf4bc1b1ee4fb45a9b5e0d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/35516ff644cf4bc1b1ee4fb45a9b5e0d 2024-11-12T19:34:53,430 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 35516ff644cf4bc1b1ee4fb45a9b5e0d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:53,430 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:53,430 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440093246; duration=0sec 2024-11-12T19:34:53,431 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:53,431 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:34:53,431 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:53,443 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:53,443 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:34:53,443 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:53,443 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=35.2 K 2024-11-12T19:34:53,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:53,453 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bdebbe0d32c47cd90eae15a299796ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731440090873 2024-11-12T19:34:53,453 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ad22d13bf1b4b1e895a1cac199acfd9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731440090915 2024-11-12T19:34:53,455 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b35f9a4a798c435b93b933b067ddafa4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:53,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440153461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440153462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440153465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440153465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440153467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,493 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:53,494 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0b263d6c84a741e98c07b91b252dde25 is 50, key is test_row_0/C:col10/1731440091552/Put/seqid=0 2024-11-12T19:34:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742270_1446 (size=12104) 2024-11-12T19:34:53,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440153580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440153584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440153585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440153585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440153588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,710 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/42adcf78ac5a4e189aaf156a8d13e418 2024-11-12T19:34:53,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/eaa3e25d260d40e69459348a3b1c9fd5 is 50, key is test_row_0/B:col10/1731440092296/Put/seqid=0 2024-11-12T19:34:53,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742271_1447 (size=12001) 2024-11-12T19:34:53,767 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7f0f4993cdbd4d819b6e45765bf09474 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7f0f4993cdbd4d819b6e45765bf09474 2024-11-12T19:34:53,780 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 7f0f4993cdbd4d819b6e45765bf09474(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:53,780 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:53,780 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440093246; duration=0sec 2024-11-12T19:34:53,780 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:53,781 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:34:53,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440153790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440153795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440153795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440153798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:53,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440153798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:53,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0b263d6c84a741e98c07b91b252dde25 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b263d6c84a741e98c07b91b252dde25 2024-11-12T19:34:53,983 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 0b263d6c84a741e98c07b91b252dde25(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:53,983 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:53,983 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440093247; duration=0sec 2024-11-12T19:34:53,983 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:53,983 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:34:54,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440154101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440154103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440154103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440154106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440154106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,151 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/eaa3e25d260d40e69459348a3b1c9fd5 2024-11-12T19:34:54,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e37d8ece8ae240dd9b030d87897b6103 is 50, key is test_row_0/C:col10/1731440092296/Put/seqid=0 2024-11-12T19:34:54,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742272_1448 (size=12001) 2024-11-12T19:34:54,204 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e37d8ece8ae240dd9b030d87897b6103 2024-11-12T19:34:54,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/42adcf78ac5a4e189aaf156a8d13e418 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418 2024-11-12T19:34:54,222 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418, entries=150, sequenceid=76, filesize=11.7 K 2024-11-12T19:34:54,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/eaa3e25d260d40e69459348a3b1c9fd5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5 2024-11-12T19:34:54,228 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5, entries=150, sequenceid=76, filesize=11.7 K 2024-11-12T19:34:54,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e37d8ece8ae240dd9b030d87897b6103 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103 2024-11-12T19:34:54,233 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103, entries=150, sequenceid=76, filesize=11.7 K 2024-11-12T19:34:54,238 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ce8b2c342e0c55d57c9696ce6e06a527 in 987ms, sequenceid=76, compaction requested=false 2024-11-12T19:34:54,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:54,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:54,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-12T19:34:54,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-12T19:34:54,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-12T19:34:54,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3810 sec 2024-11-12T19:34:54,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.3980 sec 2024-11-12T19:34:54,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:54,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:54,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/bab319cbcb0d433090f8100a337c87da is 50, key is test_row_0/A:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:54,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742273_1449 (size=9657) 2024-11-12T19:34:54,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/bab319cbcb0d433090f8100a337c87da 2024-11-12T19:34:54,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/c22391afe1b545a9bff9d49c1e16a481 is 50, key is test_row_0/B:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:54,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440154698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440154698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742274_1450 (size=9657) 2024-11-12T19:34:54,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440154709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440154709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440154710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/c22391afe1b545a9bff9d49c1e16a481 2024-11-12T19:34:54,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bec9d75d31394c3d8736cb513b1861e5 is 50, key is test_row_0/C:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:54,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440154811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440154811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742275_1451 (size=9657) 2024-11-12T19:34:54,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bec9d75d31394c3d8736cb513b1861e5 2024-11-12T19:34:54,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440154831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440154832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:54,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440154834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/bab319cbcb0d433090f8100a337c87da as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da 2024-11-12T19:34:54,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da, entries=100, sequenceid=93, filesize=9.4 K 2024-11-12T19:34:54,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/c22391afe1b545a9bff9d49c1e16a481 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481 2024-11-12T19:34:54,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481, entries=100, sequenceid=93, filesize=9.4 K 2024-11-12T19:34:54,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bec9d75d31394c3d8736cb513b1861e5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5 2024-11-12T19:34:54,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5, entries=100, sequenceid=93, filesize=9.4 K 2024-11-12T19:34:54,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for ce8b2c342e0c55d57c9696ce6e06a527 in 323ms, sequenceid=93, compaction requested=true 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:54,942 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:54,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:54,942 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:54,947 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:54,947 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:34:54,947 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:54,947 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7f0f4993cdbd4d819b6e45765bf09474, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=33.0 K 2024-11-12T19:34:54,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f0f4993cdbd4d819b6e45765bf09474, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:54,949 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting eaa3e25d260d40e69459348a3b1c9fd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731440092233 2024-11-12T19:34:54,949 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:54,949 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:34:54,949 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:54,949 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/35516ff644cf4bc1b1ee4fb45a9b5e0d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=33.0 K 2024-11-12T19:34:54,949 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c22391afe1b545a9bff9d49c1e16a481, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440093461 2024-11-12T19:34:54,952 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35516ff644cf4bc1b1ee4fb45a9b5e0d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:54,958 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42adcf78ac5a4e189aaf156a8d13e418, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731440092233 2024-11-12T19:34:54,962 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab319cbcb0d433090f8100a337c87da, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440093461 2024-11-12T19:34:54,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-12T19:34:54,978 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-12T19:34:54,982 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:54,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-12T19:34:54,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:54,983 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:54,984 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:54,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:55,001 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:55,002 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d5e5fa61f25540f180eaf9ef6cde44ad is 50, key is test_row_0/B:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:55,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:55,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:34:55,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:55,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:55,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:55,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742276_1452 (size=12207) 2024-11-12T19:34:55,042 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:55,043 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e85962da8ec146d7a8f0f60b94014062 is 50, key is test_row_0/A:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:55,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2a6358f20e0b40ab8d7a209c98425cd7 is 50, key is test_row_0/A:col10/1731440094693/Put/seqid=0 2024-11-12T19:34:55,057 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d5e5fa61f25540f180eaf9ef6cde44ad as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d5e5fa61f25540f180eaf9ef6cde44ad 2024-11-12T19:34:55,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440155046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440155048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,067 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into d5e5fa61f25540f180eaf9ef6cde44ad(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:55,067 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:55,067 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440094942; duration=0sec 2024-11-12T19:34:55,067 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:55,067 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:34:55,068 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:34:55,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440155055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440155057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,072 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:34:55,072 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:34:55,072 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,072 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b263d6c84a741e98c07b91b252dde25, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=33.0 K 2024-11-12T19:34:55,073 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b263d6c84a741e98c07b91b252dde25, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731440091541 2024-11-12T19:34:55,073 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e37d8ece8ae240dd9b030d87897b6103, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731440092233 2024-11-12T19:34:55,073 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bec9d75d31394c3d8736cb513b1861e5, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440093461 2024-11-12T19:34:55,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440155060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742278_1454 (size=16681) 2024-11-12T19:34:55,085 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:55,085 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/8fcee0be92d744b7b49af3aa3caea190 is 50, key is test_row_0/C:col10/1731440094617/Put/seqid=0 2024-11-12T19:34:55,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742277_1453 (size=12207) 2024-11-12T19:34:55,130 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e85962da8ec146d7a8f0f60b94014062 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e85962da8ec146d7a8f0f60b94014062 2024-11-12T19:34:55,137 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into e85962da8ec146d7a8f0f60b94014062(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:55,137 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:55,137 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440094942; duration=0sec 2024-11-12T19:34:55,137 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:55,137 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:34:55,138 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:55,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-12T19:34:55,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:55,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742279_1455 (size=12207) 2024-11-12T19:34:55,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440155163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440155174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440155172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,186 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/8fcee0be92d744b7b49af3aa3caea190 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8fcee0be92d744b7b49af3aa3caea190 2024-11-12T19:34:55,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440155184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,223 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 8fcee0be92d744b7b49af3aa3caea190(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:55,223 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:55,223 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440094942; duration=0sec 2024-11-12T19:34:55,223 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:55,223 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:34:55,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:55,299 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:55,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-12T19:34:55,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:55,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440155371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440155375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440155382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440155382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440155391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:55,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-12T19:34:55,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:55,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2a6358f20e0b40ab8d7a209c98425cd7 2024-11-12T19:34:55,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/239345cc2d53416ba6dfa5cff5e3f4f6 is 50, key is test_row_0/B:col10/1731440094693/Put/seqid=0 2024-11-12T19:34:55,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742280_1456 (size=12001) 2024-11-12T19:34:55,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/239345cc2d53416ba6dfa5cff5e3f4f6 2024-11-12T19:34:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:55,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bf3c340cce3341dbb66d59f224106370 is 50, key is test_row_0/C:col10/1731440094693/Put/seqid=0 2024-11-12T19:34:55,619 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:55,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-12T19:34:55,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:55,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742281_1457 (size=12001) 2024-11-12T19:34:55,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bf3c340cce3341dbb66d59f224106370 2024-11-12T19:34:55,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/2a6358f20e0b40ab8d7a209c98425cd7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7 2024-11-12T19:34:55,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440155675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7, entries=250, sequenceid=119, filesize=16.3 K 2024-11-12T19:34:55,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440155688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440155691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/239345cc2d53416ba6dfa5cff5e3f4f6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6 2024-11-12T19:34:55,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440155697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:55,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6, entries=150, sequenceid=119, filesize=11.7 K 2024-11-12T19:34:55,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/bf3c340cce3341dbb66d59f224106370 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370 2024-11-12T19:34:55,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370, entries=150, sequenceid=119, filesize=11.7 K 2024-11-12T19:34:55,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ce8b2c342e0c55d57c9696ce6e06a527 in 696ms, sequenceid=119, compaction requested=false 2024-11-12T19:34:55,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:55,778 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:55,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-12T19:34:55,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:55,782 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:55,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/3d0c7dd56c704b90a5bbe6d98d4292b1 is 50, key is test_row_0/A:col10/1731440095057/Put/seqid=0 2024-11-12T19:34:55,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742282_1458 (size=12001) 2024-11-12T19:34:55,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:56,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440156062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:56,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440156171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440156179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440156199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440156201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440156207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,238 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/3d0c7dd56c704b90a5bbe6d98d4292b1 2024-11-12T19:34:56,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57e7f355f518488a821334341c2ea4db is 50, key is test_row_0/B:col10/1731440095057/Put/seqid=0 2024-11-12T19:34:56,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742283_1459 (size=12001) 2024-11-12T19:34:56,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440156379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,690 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57e7f355f518488a821334341c2ea4db 2024-11-12T19:34:56,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440156687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:56,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e8991ba68bf04e2b89227cb6ed24cb41 is 50, key is test_row_0/C:col10/1731440095057/Put/seqid=0 2024-11-12T19:34:56,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742284_1460 (size=12001) 2024-11-12T19:34:56,775 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e8991ba68bf04e2b89227cb6ed24cb41 2024-11-12T19:34:56,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/3d0c7dd56c704b90a5bbe6d98d4292b1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1 2024-11-12T19:34:56,784 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1, entries=150, sequenceid=132, filesize=11.7 K 2024-11-12T19:34:56,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57e7f355f518488a821334341c2ea4db as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db 2024-11-12T19:34:56,789 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db, entries=150, sequenceid=132, filesize=11.7 K 2024-11-12T19:34:56,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e8991ba68bf04e2b89227cb6ed24cb41 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41 2024-11-12T19:34:56,795 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41, entries=150, sequenceid=132, filesize=11.7 K 2024-11-12T19:34:56,796 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for ce8b2c342e0c55d57c9696ce6e06a527 in 1014ms, sequenceid=132, compaction requested=true 2024-11-12T19:34:56,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:56,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:56,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-12T19:34:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-12T19:34:56,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-12T19:34:56,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8140 sec 2024-11-12T19:34:56,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.8180 sec 2024-11-12T19:34:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-12T19:34:57,097 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-12T19:34:57,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-12T19:34:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:57,103 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:57,106 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:57,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:57,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:57,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:57,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:57,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e6a6683ea3cf44d2ba81dbe81f1fc49c is 50, key is test_row_0/A:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:57,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742285_1461 (size=14541) 2024-11-12T19:34:57,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440157211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440157215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440157216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440157216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440157220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:57,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:57,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:57,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440157322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440157323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:57,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:57,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:57,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:57,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440157527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440157527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,578 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:57,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:57,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:57,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e6a6683ea3cf44d2ba81dbe81f1fc49c 2024-11-12T19:34:57,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/5e61332ff38b442ca627c86935f47655 is 50, key is test_row_0/B:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:57,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742286_1462 (size=12151) 2024-11-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:57,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:57,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:57,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:57,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440157835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440157841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:57,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:57,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:57,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/5e61332ff38b442ca627c86935f47655 2024-11-12T19:34:58,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/88b18ad7551a4796b51c5de120a7d075 is 50, key is test_row_0/C:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:58,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742287_1463 (size=12151) 2024-11-12T19:34:58,050 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:58,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:58,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:58,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:58,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:58,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:58,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:58,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:58,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440158343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:58,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440158347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:58,358 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:58,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:58,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:58,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:58,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/88b18ad7551a4796b51c5de120a7d075 2024-11-12T19:34:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e6a6683ea3cf44d2ba81dbe81f1fc49c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c 2024-11-12T19:34:58,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c, entries=200, sequenceid=159, filesize=14.2 K 2024-11-12T19:34:58,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/5e61332ff38b442ca627c86935f47655 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655 2024-11-12T19:34:58,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655, entries=150, sequenceid=159, filesize=11.9 K 2024-11-12T19:34:58,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/88b18ad7551a4796b51c5de120a7d075 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075 2024-11-12T19:34:58,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075, entries=150, sequenceid=159, filesize=11.9 K 2024-11-12T19:34:58,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ce8b2c342e0c55d57c9696ce6e06a527 in 1261ms, sequenceid=159, compaction requested=true 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:58,455 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:58,455 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:34:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:34:58,456 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,456 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,456 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d5e5fa61f25540f180eaf9ef6cde44ad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=47.2 K 2024-11-12T19:34:58,456 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e85962da8ec146d7a8f0f60b94014062, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=54.1 K 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e85962da8ec146d7a8f0f60b94014062, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440092281 2024-11-12T19:34:58,456 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d5e5fa61f25540f180eaf9ef6cde44ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440092281 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 239345cc2d53416ba6dfa5cff5e3f4f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731440094693 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a6358f20e0b40ab8d7a209c98425cd7, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731440094693 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 57e7f355f518488a821334341c2ea4db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731440095045 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d0c7dd56c704b90a5bbe6d98d4292b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731440095045 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e61332ff38b442ca627c86935f47655, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:34:58,457 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6a6683ea3cf44d2ba81dbe81f1fc49c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:34:58,465 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#387 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:58,466 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/48472fc7b5ce48c9aaa7878e999b4804 is 50, key is test_row_0/B:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:58,468 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#388 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:58,468 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/6d8cfa29e40a4479960ff8d18df22c07 is 50, key is test_row_0/A:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742288_1464 (size=12493) 2024-11-12T19:34:58,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742289_1465 (size=12493) 2024-11-12T19:34:58,510 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:58,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,511 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:58,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:58,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/841fbbd287a8413ea5f7d5076a4a3fc7 is 50, key is test_row_0/A:col10/1731440097214/Put/seqid=0 2024-11-12T19:34:58,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742290_1466 (size=12151) 2024-11-12T19:34:58,518 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/841fbbd287a8413ea5f7d5076a4a3fc7 2024-11-12T19:34:58,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/54150eaf6e674cd0be8d651a87c172a2 is 50, key is test_row_0/B:col10/1731440097214/Put/seqid=0 2024-11-12T19:34:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742291_1467 (size=12151) 2024-11-12T19:34:58,876 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/48472fc7b5ce48c9aaa7878e999b4804 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/48472fc7b5ce48c9aaa7878e999b4804 2024-11-12T19:34:58,880 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/6d8cfa29e40a4479960ff8d18df22c07 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/6d8cfa29e40a4479960ff8d18df22c07 2024-11-12T19:34:58,881 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 48472fc7b5ce48c9aaa7878e999b4804(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:58,881 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:58,881 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=12, startTime=1731440098455; duration=0sec 2024-11-12T19:34:58,881 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:34:58,881 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:34:58,881 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:34:58,882 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:34:58,882 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:34:58,882 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,882 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8fcee0be92d744b7b49af3aa3caea190, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=47.2 K 2024-11-12T19:34:58,882 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fcee0be92d744b7b49af3aa3caea190, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1731440092281 2024-11-12T19:34:58,883 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bf3c340cce3341dbb66d59f224106370, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731440094693 2024-11-12T19:34:58,883 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e8991ba68bf04e2b89227cb6ed24cb41, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731440095045 2024-11-12T19:34:58,883 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 88b18ad7551a4796b51c5de120a7d075, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:34:58,884 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 6d8cfa29e40a4479960ff8d18df22c07(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:58,884 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:58,884 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=12, startTime=1731440098455; duration=0sec 2024-11-12T19:34:58,884 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:58,884 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:34:58,891 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#391 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:34:58,891 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/2d4cc320e3cb4e0a87ebc40300da2f89 is 50, key is test_row_0/C:col10/1731440097193/Put/seqid=0 2024-11-12T19:34:58,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742292_1468 (size=12493) 2024-11-12T19:34:58,928 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/54150eaf6e674cd0be8d651a87c172a2 2024-11-12T19:34:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a1a58596c20143ceaead4e6e8f55f305 is 50, key is test_row_0/C:col10/1731440097214/Put/seqid=0 2024-11-12T19:34:58,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742293_1469 (size=12151) 2024-11-12T19:34:58,941 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a1a58596c20143ceaead4e6e8f55f305 2024-11-12T19:34:58,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/841fbbd287a8413ea5f7d5076a4a3fc7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7 2024-11-12T19:34:58,948 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7, entries=150, sequenceid=168, filesize=11.9 K 2024-11-12T19:34:58,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/54150eaf6e674cd0be8d651a87c172a2 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2 2024-11-12T19:34:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2, entries=150, sequenceid=168, filesize=11.9 K 2024-11-12T19:34:58,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a1a58596c20143ceaead4e6e8f55f305 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305 2024-11-12T19:34:58,954 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305, entries=150, sequenceid=168, filesize=11.9 K 2024-11-12T19:34:58,955 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for ce8b2c342e0c55d57c9696ce6e06a527 in 443ms, sequenceid=168, compaction requested=false 2024-11-12T19:34:58,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:58,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:58,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-12T19:34:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-12T19:34:58,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-12T19:34:58,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8490 sec 2024-11-12T19:34:58,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.8580 sec 2024-11-12T19:34:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-12T19:34:59,223 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-12T19:34:59,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:34:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-12T19:34:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:34:59,237 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:34:59,238 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:34:59,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:34:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:34:59,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:34:59,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:34:59,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b7b199b2ff274764b383575d87804ecf is 50, key is test_row_0/A:col10/1731440099242/Put/seqid=0 2024-11-12T19:34:59,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742294_1470 (size=19321) 2024-11-12T19:34:59,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b7b199b2ff274764b383575d87804ecf 2024-11-12T19:34:59,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e8d086f35bab482ea41fdba3ac459398 is 50, key is test_row_0/B:col10/1731440099242/Put/seqid=0 2024-11-12T19:34:59,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742295_1471 (size=12151) 2024-11-12T19:34:59,303 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/2d4cc320e3cb4e0a87ebc40300da2f89 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2d4cc320e3cb4e0a87ebc40300da2f89 2024-11-12T19:34:59,308 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 2d4cc320e3cb4e0a87ebc40300da2f89(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:34:59,308 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:34:59,308 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=12, startTime=1731440098455; duration=0sec 2024-11-12T19:34:59,308 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:34:59,309 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:34:59,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440159314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440159315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440159316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:34:59,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440159360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440159363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,389 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:59,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:34:59,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:59,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440159425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440159427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440159431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:34:59,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:59,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:34:59,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:59,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440159634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440159634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440159635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e8d086f35bab482ea41fdba3ac459398 2024-11-12T19:34:59,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/2e4241b550c14fd98593a5921948c206 is 50, key is test_row_0/C:col10/1731440099242/Put/seqid=0 2024-11-12T19:34:59,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:59,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:34:59,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:59,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742296_1472 (size=12151) 2024-11-12T19:34:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:34:59,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:34:59,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:34:59,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:34:59,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:34:59,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:34:59,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440159939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440159940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:34:59,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:34:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440159941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,000 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:00,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:00,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/2e4241b550c14fd98593a5921948c206 2024-11-12T19:35:00,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b7b199b2ff274764b383575d87804ecf as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf 2024-11-12T19:35:00,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf, entries=300, sequenceid=181, filesize=18.9 K 2024-11-12T19:35:00,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e8d086f35bab482ea41fdba3ac459398 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398 2024-11-12T19:35:00,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398, entries=150, sequenceid=181, filesize=11.9 K 2024-11-12T19:35:00,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/2e4241b550c14fd98593a5921948c206 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206 2024-11-12T19:35:00,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206, entries=150, sequenceid=181, filesize=11.9 K 2024-11-12T19:35:00,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ce8b2c342e0c55d57c9696ce6e06a527 in 880ms, sequenceid=181, compaction requested=true 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:00,123 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:00,123 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:00,126 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:00,126 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:00,126 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:00,126 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:00,126 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,126 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,126 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/48472fc7b5ce48c9aaa7878e999b4804, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=35.9 K 2024-11-12T19:35:00,126 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/6d8cfa29e40a4479960ff8d18df22c07, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=42.9 K 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 48472fc7b5ce48c9aaa7878e999b4804, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d8cfa29e40a4479960ff8d18df22c07, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 841fbbd287a8413ea5f7d5076a4a3fc7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1731440097208 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 54150eaf6e674cd0be8d651a87c172a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1731440097208 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7b199b2ff274764b383575d87804ecf, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099229 2024-11-12T19:35:00,127 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e8d086f35bab482ea41fdba3ac459398, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099235 2024-11-12T19:35:00,145 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#396 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:00,145 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b2a1192be2794a83b7d6c072fac5f17b is 50, key is test_row_0/A:col10/1731440099242/Put/seqid=0 2024-11-12T19:35:00,146 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:00,147 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d3b33fc1d3e0417dae7efe5d2b941a58 is 50, key is test_row_0/B:col10/1731440099242/Put/seqid=0 2024-11-12T19:35:00,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:00,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-12T19:35:00,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,152 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:00,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/657081ffb3914ee3964423df517f54ba is 50, key is test_row_0/A:col10/1731440099314/Put/seqid=0 2024-11-12T19:35:00,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742297_1473 (size=12595) 2024-11-12T19:35:00,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742298_1474 (size=12595) 2024-11-12T19:35:00,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742299_1475 (size=12151) 2024-11-12T19:35:00,195 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/657081ffb3914ee3964423df517f54ba 2024-11-12T19:35:00,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7309e789ebe240728a963acceb68db51 is 50, key is test_row_0/B:col10/1731440099314/Put/seqid=0 2024-11-12T19:35:00,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742300_1476 (size=12151) 2024-11-12T19:35:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:35:00,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:00,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440160466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440160474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440160474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440160574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440160589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440160589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,603 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b2a1192be2794a83b7d6c072fac5f17b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b2a1192be2794a83b7d6c072fac5f17b 2024-11-12T19:35:00,611 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d3b33fc1d3e0417dae7efe5d2b941a58 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d3b33fc1d3e0417dae7efe5d2b941a58 2024-11-12T19:35:00,614 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into b2a1192be2794a83b7d6c072fac5f17b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:00,614 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:00,614 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440100123; duration=0sec 2024-11-12T19:35:00,614 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:00,614 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:00,614 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:00,616 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:00,616 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:00,616 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,617 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2d4cc320e3cb4e0a87ebc40300da2f89, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=35.9 K 2024-11-12T19:35:00,617 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d4cc320e3cb4e0a87ebc40300da2f89, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731440096044 2024-11-12T19:35:00,618 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1a58596c20143ceaead4e6e8f55f305, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1731440097208 2024-11-12T19:35:00,618 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e4241b550c14fd98593a5921948c206, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099235 2024-11-12T19:35:00,619 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into d3b33fc1d3e0417dae7efe5d2b941a58(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:00,619 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:00,619 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440100123; duration=0sec 2024-11-12T19:35:00,619 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:00,619 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:00,626 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7309e789ebe240728a963acceb68db51 2024-11-12T19:35:00,629 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:00,630 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/3dd9db348ade4ba6ad584b8fced3067e is 50, key is test_row_0/C:col10/1731440099242/Put/seqid=0 2024-11-12T19:35:00,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/fef1b1ce473247f882bde422644223a6 is 50, key is test_row_0/C:col10/1731440099314/Put/seqid=0 2024-11-12T19:35:00,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742302_1478 (size=12151) 2024-11-12T19:35:00,642 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/fef1b1ce473247f882bde422644223a6 2024-11-12T19:35:00,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742301_1477 (size=12595) 2024-11-12T19:35:00,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/657081ffb3914ee3964423df517f54ba as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba 2024-11-12T19:35:00,660 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba, entries=150, sequenceid=207, filesize=11.9 K 2024-11-12T19:35:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7309e789ebe240728a963acceb68db51 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51 2024-11-12T19:35:00,668 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51, entries=150, sequenceid=207, filesize=11.9 K 2024-11-12T19:35:00,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/fef1b1ce473247f882bde422644223a6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6 2024-11-12T19:35:00,675 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/3dd9db348ade4ba6ad584b8fced3067e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3dd9db348ade4ba6ad584b8fced3067e 2024-11-12T19:35:00,676 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6, entries=150, sequenceid=207, filesize=11.9 K 2024-11-12T19:35:00,680 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ce8b2c342e0c55d57c9696ce6e06a527 in 528ms, sequenceid=207, compaction requested=false 2024-11-12T19:35:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-12T19:35:00,681 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 3dd9db348ade4ba6ad584b8fced3067e(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:00,681 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:00,681 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440100123; duration=0sec 2024-11-12T19:35:00,681 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:00,681 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:00,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-12T19:35:00,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-12T19:35:00,689 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4490 sec 2024-11-12T19:35:00,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.4540 sec 2024-11-12T19:35:00,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:00,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:00,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/816e1bda02034b7799b2d631ba7c70bd is 50, key is test_row_0/A:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:00,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742303_1479 (size=12151) 2024-11-12T19:35:00,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/816e1bda02034b7799b2d631ba7c70bd 2024-11-12T19:35:00,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/bc701c157ede4f1b8798afdaedac9967 is 50, key is test_row_0/B:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:00,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440160901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440160908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440160908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:00,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742304_1480 (size=12151) 2024-11-12T19:35:00,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/bc701c157ede4f1b8798afdaedac9967 2024-11-12T19:35:00,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/48e86c68637345dcbd84788fce036a87 is 50, key is test_row_0/C:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742305_1481 (size=12151) 2024-11-12T19:35:01,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440161015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440161017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440161018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440161225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440161225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440161230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-12T19:35:01,342 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-12T19:35:01,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-12T19:35:01,351 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:01,352 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:01,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:01,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/48e86c68637345dcbd84788fce036a87 2024-11-12T19:35:01,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/816e1bda02034b7799b2d631ba7c70bd as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd 2024-11-12T19:35:01,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440161379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440161383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,387 DEBUG [Thread-1954 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:01,387 DEBUG [Thread-1948 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:01,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd, entries=150, sequenceid=223, filesize=11.9 K 2024-11-12T19:35:01,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/bc701c157ede4f1b8798afdaedac9967 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967 2024-11-12T19:35:01,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967, entries=150, sequenceid=223, filesize=11.9 K 2024-11-12T19:35:01,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/48e86c68637345dcbd84788fce036a87 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87 2024-11-12T19:35:01,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87, entries=150, sequenceid=223, filesize=11.9 K 2024-11-12T19:35:01,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ce8b2c342e0c55d57c9696ce6e06a527 in 603ms, sequenceid=223, compaction requested=true 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:01,409 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:01,409 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:01,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:01,410 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:01,410 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b2a1192be2794a83b7d6c072fac5f17b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.0 K 2024-11-12T19:35:01,410 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:01,410 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d3b33fc1d3e0417dae7efe5d2b941a58, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.0 K 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2a1192be2794a83b7d6c072fac5f17b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099235 2024-11-12T19:35:01,410 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b33fc1d3e0417dae7efe5d2b941a58, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099235 2024-11-12T19:35:01,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7309e789ebe240728a963acceb68db51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731440099283 2024-11-12T19:35:01,413 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 657081ffb3914ee3964423df517f54ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731440099283 2024-11-12T19:35:01,413 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bc701c157ede4f1b8798afdaedac9967, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:01,413 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 816e1bda02034b7799b2d631ba7c70bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:01,429 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:01,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/d3b98170b7114d1a8df2332037bba8d0 is 50, key is test_row_0/A:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:01,445 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#406 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:01,446 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/973e9be1089e4fc7aa564da0d08d6da4 is 50, key is test_row_0/B:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:01,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742306_1482 (size=12697) 2024-11-12T19:35:01,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:01,471 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/d3b98170b7114d1a8df2332037bba8d0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/d3b98170b7114d1a8df2332037bba8d0 2024-11-12T19:35:01,483 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into d3b98170b7114d1a8df2332037bba8d0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:01,483 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:01,483 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440101409; duration=0sec 2024-11-12T19:35:01,483 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:01,483 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:01,483 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:01,484 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:01,484 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:01,484 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:01,484 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3dd9db348ade4ba6ad584b8fced3067e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.0 K 2024-11-12T19:35:01,485 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dd9db348ade4ba6ad584b8fced3067e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731440099235 2024-11-12T19:35:01,485 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting fef1b1ce473247f882bde422644223a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731440099283 2024-11-12T19:35:01,485 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48e86c68637345dcbd84788fce036a87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:01,495 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:01,496 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0b0170d01d0e4f5caa6e4307b7240fee is 50, key is test_row_0/C:col10/1731440100472/Put/seqid=0 2024-11-12T19:35:01,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742307_1483 (size=12697) 2024-11-12T19:35:01,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:01,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-12T19:35:01,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:01,506 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:01,512 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/973e9be1089e4fc7aa564da0d08d6da4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/973e9be1089e4fc7aa564da0d08d6da4 2024-11-12T19:35:01,518 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 973e9be1089e4fc7aa564da0d08d6da4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:01,518 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:01,518 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440101409; duration=0sec 2024-11-12T19:35:01,518 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:01,518 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f332ea1d23b74b2d910a6586ccdc638a is 50, key is test_row_0/A:col10/1731440100906/Put/seqid=0 2024-11-12T19:35:01,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742309_1485 (size=12151) 2024-11-12T19:35:01,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:01,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:01,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742308_1484 (size=12697) 2024-11-12T19:35:01,553 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0b0170d01d0e4f5caa6e4307b7240fee as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b0170d01d0e4f5caa6e4307b7240fee 2024-11-12T19:35:01,571 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 0b0170d01d0e4f5caa6e4307b7240fee(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:01,571 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:01,571 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440101409; duration=0sec 2024-11-12T19:35:01,571 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:01,571 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440161574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440161577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440161584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:01,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440161686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440161689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440161694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440161889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440161892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440161899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:01,937 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f332ea1d23b74b2d910a6586ccdc638a 2024-11-12T19:35:01,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/fec0fd41c66c48f7a3a142212be7dcf7 is 50, key is test_row_0/B:col10/1731440100906/Put/seqid=0 2024-11-12T19:35:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742310_1486 (size=12151) 2024-11-12T19:35:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440162198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440162204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440162211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,387 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/fec0fd41c66c48f7a3a142212be7dcf7 2024-11-12T19:35:02,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/252a9449f0354a29a7c73d0469827e5d is 50, key is test_row_0/C:col10/1731440100906/Put/seqid=0 2024-11-12T19:35:02,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742311_1487 (size=12151) 2024-11-12T19:35:02,459 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/252a9449f0354a29a7c73d0469827e5d 2024-11-12T19:35:02,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:02,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f332ea1d23b74b2d910a6586ccdc638a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a 2024-11-12T19:35:02,483 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a, entries=150, sequenceid=247, filesize=11.9 K 2024-11-12T19:35:02,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/fec0fd41c66c48f7a3a142212be7dcf7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7 2024-11-12T19:35:02,491 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7, entries=150, sequenceid=247, filesize=11.9 K 2024-11-12T19:35:02,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/252a9449f0354a29a7c73d0469827e5d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d 2024-11-12T19:35:02,498 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d, entries=150, sequenceid=247, filesize=11.9 K 2024-11-12T19:35:02,505 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ce8b2c342e0c55d57c9696ce6e06a527 in 1000ms, sequenceid=247, compaction requested=false 2024-11-12T19:35:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-12T19:35:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-12T19:35:02,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-12T19:35:02,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1580 sec 2024-11-12T19:35:02,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.1730 sec 2024-11-12T19:35:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:02,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:35:02,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:02,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e8d9b3ac42dc468e9db245adefd555d0 is 50, key is test_row_0/A:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:02,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742312_1488 (size=12301) 2024-11-12T19:35:02,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440162765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440162776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440162776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440162883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440162889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:02,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440162890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440163097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440163097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440163098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e8d9b3ac42dc468e9db245adefd555d0 2024-11-12T19:35:03,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/a0be45237e3e40fa87b77b0632ed28bc is 50, key is test_row_0/B:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:03,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742313_1489 (size=12301) 2024-11-12T19:35:03,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/a0be45237e3e40fa87b77b0632ed28bc 2024-11-12T19:35:03,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/67d445a4581241f1bb3305ac02cf2496 is 50, key is test_row_0/C:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:03,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742314_1490 (size=12301) 2024-11-12T19:35:03,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/67d445a4581241f1bb3305ac02cf2496 2024-11-12T19:35:03,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/e8d9b3ac42dc468e9db245adefd555d0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0 2024-11-12T19:35:03,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:35:03,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/a0be45237e3e40fa87b77b0632ed28bc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc 2024-11-12T19:35:03,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:35:03,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/67d445a4581241f1bb3305ac02cf2496 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496 2024-11-12T19:35:03,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496, entries=150, sequenceid=263, filesize=12.0 K 2024-11-12T19:35:03,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ce8b2c342e0c55d57c9696ce6e06a527 in 489ms, sequenceid=263, compaction requested=true 2024-11-12T19:35:03,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:03,204 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:03,205 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:03,205 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:03,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:03,205 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:03,205 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,205 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/d3b98170b7114d1a8df2332037bba8d0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.3 K 2024-11-12T19:35:03,206 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3b98170b7114d1a8df2332037bba8d0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:03,207 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f332ea1d23b74b2d910a6586ccdc638a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1731440100888 2024-11-12T19:35:03,207 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:03,207 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8d9b3ac42dc468e9db245adefd555d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:03,207 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:03,207 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,208 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/973e9be1089e4fc7aa564da0d08d6da4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.3 K 2024-11-12T19:35:03,208 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 973e9be1089e4fc7aa564da0d08d6da4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:03,209 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting fec0fd41c66c48f7a3a142212be7dcf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1731440100888 2024-11-12T19:35:03,209 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a0be45237e3e40fa87b77b0632ed28bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:03,214 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:03,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/107e6e2c919f476b8fec23c6405d487d is 50, key is test_row_0/A:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:03,221 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:03,221 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/38fd71ca15b44819b65b97d22482afd8 is 50, key is test_row_0/B:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:03,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742315_1491 (size=12949) 2024-11-12T19:35:03,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742316_1492 (size=12949) 2024-11-12T19:35:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:03,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:03,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0c72c4d174e341b4a7724a8fdad9f29e is 50, key is test_row_0/A:col10/1731440102774/Put/seqid=0 2024-11-12T19:35:03,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742317_1493 (size=17181) 2024-11-12T19:35:03,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0c72c4d174e341b4a7724a8fdad9f29e 2024-11-12T19:35:03,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/145f4a2025034a9084028f7e36f7d2fc is 50, key is test_row_0/B:col10/1731440102774/Put/seqid=0 2024-11-12T19:35:03,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440163435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742318_1494 (size=12301) 2024-11-12T19:35:03,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440163439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440163440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-12T19:35:03,464 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-12T19:35:03,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-12T19:35:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:03,466 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:03,467 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:03,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:03,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440163541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440163545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440163552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:03,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:03,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:03,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:03,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,629 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/107e6e2c919f476b8fec23c6405d487d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/107e6e2c919f476b8fec23c6405d487d 2024-11-12T19:35:03,629 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/38fd71ca15b44819b65b97d22482afd8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/38fd71ca15b44819b65b97d22482afd8 2024-11-12T19:35:03,634 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 38fd71ca15b44819b65b97d22482afd8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:03,635 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440103205; duration=0sec 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:03,635 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 107e6e2c919f476b8fec23c6405d487d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:03,635 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440103204; duration=0sec 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:03,635 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:03,636 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:03,636 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:03,636 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,636 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b0170d01d0e4f5caa6e4307b7240fee, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.3 K 2024-11-12T19:35:03,636 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b0170d01d0e4f5caa6e4307b7240fee, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731440100472 2024-11-12T19:35:03,636 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 252a9449f0354a29a7c73d0469827e5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1731440100888 2024-11-12T19:35:03,637 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 67d445a4581241f1bb3305ac02cf2496, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:03,643 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:03,644 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/3df723f3306c49ebbe52a35aed6ce5e5 is 50, key is test_row_0/C:col10/1731440102713/Put/seqid=0 2024-11-12T19:35:03,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742319_1495 (size=12949) 2024-11-12T19:35:03,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440163750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440163751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:03,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440163763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:03,770 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:03,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/145f4a2025034a9084028f7e36f7d2fc 2024-11-12T19:35:03,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e171967c8ab948b48acf30e5ee4fd287 is 50, key is test_row_0/C:col10/1731440102774/Put/seqid=0 2024-11-12T19:35:03,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742320_1496 (size=12301) 2024-11-12T19:35:03,927 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:03,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:03,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:03,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:03,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:35:04,056 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/3df723f3306c49ebbe52a35aed6ce5e5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3df723f3306c49ebbe52a35aed6ce5e5 2024-11-12T19:35:04,061 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 3df723f3306c49ebbe52a35aed6ce5e5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:04,061 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:04,061 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440103205; duration=0sec 2024-11-12T19:35:04,062 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:04,062 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:04,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440164057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440164063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:04,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440164075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:04,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:04,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:04,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:04,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:04,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:04,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:04,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e171967c8ab948b48acf30e5ee4fd287 2024-11-12T19:35:04,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/0c72c4d174e341b4a7724a8fdad9f29e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e 2024-11-12T19:35:04,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e, entries=250, sequenceid=288, filesize=16.8 K 2024-11-12T19:35:04,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/145f4a2025034a9084028f7e36f7d2fc as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc 2024-11-12T19:35:04,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc, entries=150, sequenceid=288, filesize=12.0 K 2024-11-12T19:35:04,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/e171967c8ab948b48acf30e5ee4fd287 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287 2024-11-12T19:35:04,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287, entries=150, sequenceid=288, filesize=12.0 K 2024-11-12T19:35:04,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ce8b2c342e0c55d57c9696ce6e06a527 in 906ms, sequenceid=288, compaction requested=false 2024-11-12T19:35:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:04,397 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:04,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-12T19:35:04,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,398 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:35:04,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:04,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:04,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/174a6946fe8d41178d8b108ff70b87db is 50, key is test_row_0/A:col10/1731440103433/Put/seqid=0 2024-11-12T19:35:04,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742321_1497 (size=12301) 2024-11-12T19:35:04,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:04,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:04,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:04,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440164626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440164632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440164633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440164734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440164740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:04,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440164745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:04,831 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/174a6946fe8d41178d8b108ff70b87db 2024-11-12T19:35:04,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/701ac1cb331a45c3838363f044ab30f5 is 50, key is test_row_0/B:col10/1731440103433/Put/seqid=0 2024-11-12T19:35:04,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742322_1498 (size=12301) 2024-11-12T19:35:04,842 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/701ac1cb331a45c3838363f044ab30f5 2024-11-12T19:35:04,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b4a86712fe7f4165bdb1926577672249 is 50, key is test_row_0/C:col10/1731440103433/Put/seqid=0 2024-11-12T19:35:04,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742323_1499 (size=12301) 2024-11-12T19:35:04,882 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b4a86712fe7f4165bdb1926577672249 2024-11-12T19:35:04,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/174a6946fe8d41178d8b108ff70b87db as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db 2024-11-12T19:35:04,908 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db, entries=150, sequenceid=302, filesize=12.0 K 2024-11-12T19:35:04,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/701ac1cb331a45c3838363f044ab30f5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5 2024-11-12T19:35:04,913 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5, entries=150, sequenceid=302, filesize=12.0 K 2024-11-12T19:35:04,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/b4a86712fe7f4165bdb1926577672249 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249 2024-11-12T19:35:04,917 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249, entries=150, sequenceid=302, filesize=12.0 K 2024-11-12T19:35:04,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-12T19:35:04,918 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for ce8b2c342e0c55d57c9696ce6e06a527 in 520ms, sequenceid=302, compaction requested=true 2024-11-12T19:35:04,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:04,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:04,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-12T19:35:04,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-12T19:35:04,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-12T19:35:04,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4520 sec 2024-11-12T19:35:04,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4570 sec 2024-11-12T19:35:04,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:35:04,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:04,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:04,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:04,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:04,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:04,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/057b20789289480da1ee9e269fdcedd7 is 50, key is test_row_0/A:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742324_1500 (size=14741) 2024-11-12T19:35:05,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440164999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440164999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440165000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440165107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440165108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440165108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440165314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440165315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440165315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/057b20789289480da1ee9e269fdcedd7 2024-11-12T19:35:05,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56744 deadline: 1731440165393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,397 DEBUG [Thread-1954 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:05,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d94a9a8caaa849dc8527d8b353f19ea3 is 50, key is test_row_0/B:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:05,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742325_1501 (size=12301) 2024-11-12T19:35:05,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d94a9a8caaa849dc8527d8b353f19ea3 2024-11-12T19:35:05,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a9cf60d4e9c9454ea635290273da7617 is 50, key is test_row_0/C:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:05,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56710 deadline: 1731440165427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,431 DEBUG [Thread-1948 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8215 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., hostname=81d69e608036,33067,1731439956493, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742326_1502 (size=12301) 2024-11-12T19:35:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-12T19:35:05,577 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-12T19:35:05,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-12T19:35:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-12T19:35:05,588 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:05,590 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:05,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:05,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440165623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440165627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440165630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-12T19:35:05,751 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:05,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-12T19:35:05,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:05,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a9cf60d4e9c9454ea635290273da7617 2024-11-12T19:35:05,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/057b20789289480da1ee9e269fdcedd7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7 2024-11-12T19:35:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-12T19:35:05,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7, entries=200, sequenceid=329, filesize=14.4 K 2024-11-12T19:35:05,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/d94a9a8caaa849dc8527d8b353f19ea3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3 2024-11-12T19:35:05,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:05,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-12T19:35:05,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:05,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3, entries=150, sequenceid=329, filesize=12.0 K 2024-11-12T19:35:05,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/a9cf60d4e9c9454ea635290273da7617 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617 2024-11-12T19:35:05,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617, entries=150, sequenceid=329, filesize=12.0 K 2024-11-12T19:35:05,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ce8b2c342e0c55d57c9696ce6e06a527 in 996ms, sequenceid=329, compaction requested=true 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:05,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-12T19:35:05,945 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:05,946 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:05,947 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57172 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:05,947 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:05,947 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,947 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/107e6e2c919f476b8fec23c6405d487d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=55.8 K 2024-11-12T19:35:05,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 107e6e2c919f476b8fec23c6405d487d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:05,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c72c4d174e341b4a7724a8fdad9f29e, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731440102763 2024-11-12T19:35:05,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 174a6946fe8d41178d8b108ff70b87db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1731440103433 2024-11-12T19:35:05,948 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 057b20789289480da1ee9e269fdcedd7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:05,953 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:05,953 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:05,953 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:05,953 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3df723f3306c49ebbe52a35aed6ce5e5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=48.7 K 2024-11-12T19:35:05,954 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3df723f3306c49ebbe52a35aed6ce5e5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:05,954 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting e171967c8ab948b48acf30e5ee4fd287, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731440102774 2024-11-12T19:35:05,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4a86712fe7f4165bdb1926577672249, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1731440103433 2024-11-12T19:35:05,955 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9cf60d4e9c9454ea635290273da7617, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:05,964 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#426 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:05,965 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/191cad47466a40b786d42da9fe71cc13 is 50, key is test_row_0/A:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:05,969 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#427 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:05,970 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/8c1e31a5e72a40fba03913c540eb7229 is 50, key is test_row_0/C:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:05,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742328_1504 (size=13085) 2024-11-12T19:35:06,003 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/8c1e31a5e72a40fba03913c540eb7229 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8c1e31a5e72a40fba03913c540eb7229 2024-11-12T19:35:06,007 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 8c1e31a5e72a40fba03913c540eb7229(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:06,007 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:06,007 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=12, startTime=1731440105943; duration=0sec 2024-11-12T19:35:06,007 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:06,007 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:06,007 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:06,008 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:06,008 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:06,008 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:06,008 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/38fd71ca15b44819b65b97d22482afd8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=48.7 K 2024-11-12T19:35:06,009 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38fd71ca15b44819b65b97d22482afd8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1731440101560 2024-11-12T19:35:06,009 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 145f4a2025034a9084028f7e36f7d2fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731440102774 2024-11-12T19:35:06,009 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 701ac1cb331a45c3838363f044ab30f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1731440103433 2024-11-12T19:35:06,009 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d94a9a8caaa849dc8527d8b353f19ea3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742327_1503 (size=13085) 2024-11-12T19:35:06,020 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#428 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:06,020 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/859af1ee7d104ea3ae101e8553b1a9f8 is 50, key is test_row_0/B:col10/1731440104945/Put/seqid=0 2024-11-12T19:35:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742329_1505 (size=13085) 2024-11-12T19:35:06,043 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/859af1ee7d104ea3ae101e8553b1a9f8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/859af1ee7d104ea3ae101e8553b1a9f8 2024-11-12T19:35:06,049 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 859af1ee7d104ea3ae101e8553b1a9f8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:06,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:06,049 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=12, startTime=1731440105943; duration=0sec 2024-11-12T19:35:06,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:06,049 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:06,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:06,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:06,067 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:06,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/9b82603d071043d58f38c02d2a9ae72d is 50, key is test_row_0/A:col10/1731440104997/Put/seqid=0 2024-11-12T19:35:06,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742330_1506 (size=12301) 2024-11-12T19:35:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:06,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-12T19:35:06,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440166203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440166212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440166213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440166319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440166321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440166325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,427 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/191cad47466a40b786d42da9fe71cc13 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/191cad47466a40b786d42da9fe71cc13 2024-11-12T19:35:06,433 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 191cad47466a40b786d42da9fe71cc13(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:06,433 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:06,433 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=12, startTime=1731440105943; duration=0sec 2024-11-12T19:35:06,433 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:06,433 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:06,490 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/9b82603d071043d58f38c02d2a9ae72d 2024-11-12T19:35:06,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 is 50, key is test_row_0/B:col10/1731440104997/Put/seqid=0 2024-11-12T19:35:06,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440166525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742331_1507 (size=12301) 2024-11-12T19:35:06,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440166534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440166542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,548 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 2024-11-12T19:35:06,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/31b24cfb42974b3c89d8a216986517e7 is 50, key is test_row_0/C:col10/1731440104997/Put/seqid=0 2024-11-12T19:35:06,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742332_1508 (size=12301) 2024-11-12T19:35:06,589 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/31b24cfb42974b3c89d8a216986517e7 2024-11-12T19:35:06,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/9b82603d071043d58f38c02d2a9ae72d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d 2024-11-12T19:35:06,605 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d, entries=150, sequenceid=340, filesize=12.0 K 2024-11-12T19:35:06,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 2024-11-12T19:35:06,610 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9, entries=150, sequenceid=340, filesize=12.0 K 2024-11-12T19:35:06,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/31b24cfb42974b3c89d8a216986517e7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7 2024-11-12T19:35:06,618 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7, entries=150, sequenceid=340, filesize=12.0 K 2024-11-12T19:35:06,621 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for ce8b2c342e0c55d57c9696ce6e06a527 in 554ms, sequenceid=340, compaction requested=false 2024-11-12T19:35:06,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:06,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:06,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-12T19:35:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-12T19:35:06,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-12T19:35:06,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0320 sec 2024-11-12T19:35:06,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.0540 sec 2024-11-12T19:35:06,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-12T19:35:06,690 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-12T19:35:06,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-12T19:35:06,693 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:06,694 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:06,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:06,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:06,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:06,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:06,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:06,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:06,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:06,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:06,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:06,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:06,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:06,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:06,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:06,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:06,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f67d2acbea914c9c86e216c85b448e2c is 50, key is test_row_0/A:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:06,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440166853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440166862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440166867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742333_1509 (size=14741) 2024-11-12T19:35:06,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440166969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440166969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:06,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:06,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440166977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,004 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:07,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:07,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:07,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:07,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:07,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:07,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:07,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440167181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440167187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440167189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f67d2acbea914c9c86e216c85b448e2c 2024-11-12T19:35:07,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:07,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:07,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:07,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57a4d7120f27406c8085f7e096a72d9c is 50, key is test_row_0/B:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:07,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742334_1510 (size=12301) 2024-11-12T19:35:07,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57a4d7120f27406c8085f7e096a72d9c 2024-11-12T19:35:07,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0008c1c407f5421898c26ce3ecbe17e0 is 50, key is test_row_0/C:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:07,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742335_1511 (size=12301) 2024-11-12T19:35:07,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0008c1c407f5421898c26ce3ecbe17e0 2024-11-12T19:35:07,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:07,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:07,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:07,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:07,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/f67d2acbea914c9c86e216c85b448e2c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c 2024-11-12T19:35:07,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440167483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c, entries=200, sequenceid=369, filesize=14.4 K 2024-11-12T19:35:07,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/57a4d7120f27406c8085f7e096a72d9c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c 2024-11-12T19:35:07,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440167496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:07,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440167498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:07,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c, entries=150, sequenceid=369, filesize=12.0 K 2024-11-12T19:35:07,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/0008c1c407f5421898c26ce3ecbe17e0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0 2024-11-12T19:35:07,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0, entries=150, sequenceid=369, filesize=12.0 K 2024-11-12T19:35:07,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for ce8b2c342e0c55d57c9696ce6e06a527 in 683ms, sequenceid=369, compaction requested=true 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:07,524 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:07,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:07,524 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:07,525 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:07,526 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:07,526 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,526 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/191cad47466a40b786d42da9fe71cc13, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=39.2 K 2024-11-12T19:35:07,526 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:07,526 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:07,526 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,526 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/859af1ee7d104ea3ae101e8553b1a9f8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.8 K 2024-11-12T19:35:07,527 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 859af1ee7d104ea3ae101e8553b1a9f8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:07,527 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191cad47466a40b786d42da9fe71cc13, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:07,532 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting dedda3cb3db44baf9bc8f3f9db5e1ac9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1731440104997 2024-11-12T19:35:07,532 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b82603d071043d58f38c02d2a9ae72d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1731440104997 2024-11-12T19:35:07,534 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 57a4d7120f27406c8085f7e096a72d9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:07,534 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f67d2acbea914c9c86e216c85b448e2c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:07,574 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:07,574 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/da79ec24855045d1a46337cc9c174a9b is 50, key is test_row_0/B:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:07,577 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:07,577 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/ab872127a8614189a8d3cb19bf9abdc5 is 50, key is test_row_0/A:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:07,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742336_1512 (size=13187) 2024-11-12T19:35:07,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742337_1513 (size=13187) 2024-11-12T19:35:07,631 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/ab872127a8614189a8d3cb19bf9abdc5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/ab872127a8614189a8d3cb19bf9abdc5 2024-11-12T19:35:07,637 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:07,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-12T19:35:07,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,637 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:07,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/09327af6a1e847538d88f73f5821a723 is 50, key is test_row_0/A:col10/1731440106854/Put/seqid=0 2024-11-12T19:35:07,663 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into ab872127a8614189a8d3cb19bf9abdc5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:07,664 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:07,664 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440107524; duration=0sec 2024-11-12T19:35:07,664 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:07,664 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:07,664 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:07,665 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:07,665 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:07,665 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:07,665 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8c1e31a5e72a40fba03913c540eb7229, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.8 K 2024-11-12T19:35:07,665 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c1e31a5e72a40fba03913c540eb7229, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1731440104607 2024-11-12T19:35:07,665 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31b24cfb42974b3c89d8a216986517e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1731440104997 2024-11-12T19:35:07,666 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0008c1c407f5421898c26ce3ecbe17e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:07,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742338_1514 (size=12301) 2024-11-12T19:35:07,681 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:07,681 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/f88baf8661db432cbdb82210a742efc0 is 50, key is test_row_0/C:col10/1731440106199/Put/seqid=0 2024-11-12T19:35:07,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742339_1515 (size=13187) 2024-11-12T19:35:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:08,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:08,060 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/da79ec24855045d1a46337cc9c174a9b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/da79ec24855045d1a46337cc9c174a9b 2024-11-12T19:35:08,083 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into da79ec24855045d1a46337cc9c174a9b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:08,083 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:08,083 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440107524; duration=0sec 2024-11-12T19:35:08,084 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:08,084 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:08,085 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/09327af6a1e847538d88f73f5821a723 2024-11-12T19:35:08,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440168090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440168091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440168094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/98fd1df263934a3488e72668d32230cb is 50, key is test_row_0/B:col10/1731440106854/Put/seqid=0 2024-11-12T19:35:08,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742340_1516 (size=12301) 2024-11-12T19:35:08,151 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/f88baf8661db432cbdb82210a742efc0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/f88baf8661db432cbdb82210a742efc0 2024-11-12T19:35:08,151 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/98fd1df263934a3488e72668d32230cb 2024-11-12T19:35:08,196 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into f88baf8661db432cbdb82210a742efc0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:08,196 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:08,196 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440107524; duration=0sec 2024-11-12T19:35:08,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,196 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:08,196 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440168196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/4a7f2fa56ba34d5192490d3365e16c91 is 50, key is test_row_0/C:col10/1731440106854/Put/seqid=0 2024-11-12T19:35:08,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440168208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440168208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742341_1517 (size=12301) 2024-11-12T19:35:08,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440168404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440168428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440168429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,635 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/4a7f2fa56ba34d5192490d3365e16c91 2024-11-12T19:35:08,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/09327af6a1e847538d88f73f5821a723 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723 2024-11-12T19:35:08,693 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723, entries=150, sequenceid=377, filesize=12.0 K 2024-11-12T19:35:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/98fd1df263934a3488e72668d32230cb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb 2024-11-12T19:35:08,707 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb, entries=150, sequenceid=377, filesize=12.0 K 2024-11-12T19:35:08,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/4a7f2fa56ba34d5192490d3365e16c91 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91 2024-11-12T19:35:08,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440168708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,723 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91, entries=150, sequenceid=377, filesize=12.0 K 2024-11-12T19:35:08,726 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for ce8b2c342e0c55d57c9696ce6e06a527 in 1089ms, sequenceid=377, compaction requested=false 2024-11-12T19:35:08,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:08,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:08,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-12T19:35:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-12T19:35:08,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-12T19:35:08,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0330 sec 2024-11-12T19:35:08,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.0390 sec 2024-11-12T19:35:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:08,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:08,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b580711c2192459e9a68df87ae25501f is 50, key is test_row_0/A:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:08,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440168757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440168758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742342_1518 (size=12301) 2024-11-12T19:35:08,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b580711c2192459e9a68df87ae25501f 2024-11-12T19:35:08,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/346e9c33848e42cba9ff050ed7102454 is 50, key is test_row_0/B:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:08,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742343_1519 (size=12301) 2024-11-12T19:35:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-12T19:35:08,839 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-12T19:35:08,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/346e9c33848e42cba9ff050ed7102454 2024-11-12T19:35:08,841 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-12T19:35:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:08,849 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:08,849 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:08,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:08,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/6c4d4e583b0144b9acb84825270725b3 is 50, key is test_row_0/C:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:08,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440168873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440168874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:08,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742344_1520 (size=12301) 2024-11-12T19:35:08,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/6c4d4e583b0144b9acb84825270725b3 2024-11-12T19:35:08,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/b580711c2192459e9a68df87ae25501f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f 2024-11-12T19:35:08,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f, entries=150, sequenceid=410, filesize=12.0 K 2024-11-12T19:35:08,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/346e9c33848e42cba9ff050ed7102454 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454 2024-11-12T19:35:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:08,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454, entries=150, sequenceid=410, filesize=12.0 K 2024-11-12T19:35:08,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/6c4d4e583b0144b9acb84825270725b3 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3 2024-11-12T19:35:08,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3, entries=150, sequenceid=410, filesize=12.0 K 2024-11-12T19:35:08,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for ce8b2c342e0c55d57c9696ce6e06a527 in 225ms, sequenceid=410, compaction requested=true 2024-11-12T19:35:08,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:08,966 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:08,973 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:08,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:08,973 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:08,973 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:08,973 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:08,973 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/ab872127a8614189a8d3cb19bf9abdc5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.9 K 2024-11-12T19:35:08,975 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab872127a8614189a8d3cb19bf9abdc5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:08,976 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09327af6a1e847538d88f73f5821a723, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731440106843 2024-11-12T19:35:08,976 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b580711c2192459e9a68df87ae25501f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:08,976 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:08,976 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:08,977 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:08,977 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/da79ec24855045d1a46337cc9c174a9b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.9 K 2024-11-12T19:35:08,978 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting da79ec24855045d1a46337cc9c174a9b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:08,978 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 98fd1df263934a3488e72668d32230cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731440106843 2024-11-12T19:35:08,979 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 346e9c33848e42cba9ff050ed7102454, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:09,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:09,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-12T19:35:09,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:09,003 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:09,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:09,005 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:09,005 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7eb3ae2d7d53440c82a7fafa08faf173 is 50, key is test_row_0/B:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:09,008 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#445 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:09,008 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/3e0645cbffff4a0bb2ebb1bb99e5c786 is 50, key is test_row_0/A:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:09,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/c703cd7bc14e497b8412a6d045440dc0 is 50, key is test_row_0/A:col10/1731440108755/Put/seqid=0 2024-11-12T19:35:09,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742346_1522 (size=13289) 2024-11-12T19:35:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742345_1521 (size=13289) 2024-11-12T19:35:09,063 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/3e0645cbffff4a0bb2ebb1bb99e5c786 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3e0645cbffff4a0bb2ebb1bb99e5c786 2024-11-12T19:35:09,067 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/7eb3ae2d7d53440c82a7fafa08faf173 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7eb3ae2d7d53440c82a7fafa08faf173 2024-11-12T19:35:09,074 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 3e0645cbffff4a0bb2ebb1bb99e5c786(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:09,075 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:09,075 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440108966; duration=0sec 2024-11-12T19:35:09,075 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:09,075 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:09,075 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:09,076 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:09,076 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:09,076 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:09,076 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/f88baf8661db432cbdb82210a742efc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=36.9 K 2024-11-12T19:35:09,077 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f88baf8661db432cbdb82210a742efc0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1731440106199 2024-11-12T19:35:09,079 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a7f2fa56ba34d5192490d3365e16c91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731440106843 2024-11-12T19:35:09,079 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 7eb3ae2d7d53440c82a7fafa08faf173(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:09,079 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:09,079 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440108973; duration=0sec 2024-11-12T19:35:09,079 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c4d4e583b0144b9acb84825270725b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:09,079 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:09,079 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:09,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742347_1523 (size=12301) 2024-11-12T19:35:09,092 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:09,093 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/578151087f7b443a8b59d7c481af3870 is 50, key is test_row_0/C:col10/1731440108738/Put/seqid=0 2024-11-12T19:35:09,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:09,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. as already flushing 2024-11-12T19:35:09,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742348_1524 (size=13289) 2024-11-12T19:35:09,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:09,145 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/578151087f7b443a8b59d7c481af3870 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/578151087f7b443a8b59d7c481af3870 2024-11-12T19:35:09,151 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 578151087f7b443a8b59d7c481af3870(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:09,151 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:09,152 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440108973; duration=0sec 2024-11-12T19:35:09,152 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:09,152 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:09,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440169200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440169202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440169224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440169308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440169310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:09,482 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/c703cd7bc14e497b8412a6d045440dc0 2024-11-12T19:35:09,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440169515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e5d84af08edd4d639ddd5be786ca44d9 is 50, key is test_row_0/B:col10/1731440108755/Put/seqid=0 2024-11-12T19:35:09,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440169522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742349_1525 (size=12301) 2024-11-12T19:35:09,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440169823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440169825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:09,956 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e5d84af08edd4d639ddd5be786ca44d9 2024-11-12T19:35:09,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/53a2d5c6e0044ee6a91aa844e7a466f8 is 50, key is test_row_0/C:col10/1731440108755/Put/seqid=0 2024-11-12T19:35:09,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742350_1526 (size=12301) 2024-11-12T19:35:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56712 deadline: 1731440170239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:10,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:10,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56780 deadline: 1731440170333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:10,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:56736 deadline: 1731440170337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:10,409 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/53a2d5c6e0044ee6a91aa844e7a466f8 2024-11-12T19:35:10,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/c703cd7bc14e497b8412a6d045440dc0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0 2024-11-12T19:35:10,425 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0, entries=150, sequenceid=416, filesize=12.0 K 2024-11-12T19:35:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/e5d84af08edd4d639ddd5be786ca44d9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9 2024-11-12T19:35:10,435 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9, entries=150, sequenceid=416, filesize=12.0 K 2024-11-12T19:35:10,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/53a2d5c6e0044ee6a91aa844e7a466f8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8 2024-11-12T19:35:10,443 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8, entries=150, sequenceid=416, filesize=12.0 K 2024-11-12T19:35:10,447 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for ce8b2c342e0c55d57c9696ce6e06a527 in 1444ms, sequenceid=416, compaction requested=false 2024-11-12T19:35:10,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:10,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:10,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-12T19:35:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-12T19:35:10,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-12T19:35:10,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5980 sec 2024-11-12T19:35:10,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6090 sec 2024-11-12T19:35:10,865 DEBUG [Thread-1965 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dee2855 to 127.0.0.1:60358 2024-11-12T19:35:10,865 DEBUG [Thread-1965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:10,866 DEBUG [Thread-1961 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10bda459 to 127.0.0.1:60358 2024-11-12T19:35:10,866 DEBUG [Thread-1961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:10,866 DEBUG [Thread-1959 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x402e5def to 127.0.0.1:60358 2024-11-12T19:35:10,866 DEBUG [Thread-1959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:10,871 DEBUG [Thread-1967 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54e8a98a to 127.0.0.1:60358 2024-11-12T19:35:10,871 DEBUG [Thread-1967 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:10,872 DEBUG [Thread-1963 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0657e1bf to 127.0.0.1:60358 2024-11-12T19:35:10,872 DEBUG [Thread-1963 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:10,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-12T19:35:10,950 INFO [Thread-1958 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-12T19:35:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:11,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-12T19:35:11,345 DEBUG [Thread-1952 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d125972 to 127.0.0.1:60358 2024-11-12T19:35:11,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:11,345 DEBUG [Thread-1952 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:11,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:11,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:11,349 DEBUG [Thread-1956 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b55f2f to 127.0.0.1:60358 2024-11-12T19:35:11,349 DEBUG [Thread-1956 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:11,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/419df89e5485497b90fa8075e58b20b8 is 50, key is test_row_0/A:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:11,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742351_1527 (size=12301) 2024-11-12T19:35:11,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/419df89e5485497b90fa8075e58b20b8 2024-11-12T19:35:11,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4c7e1bb7e352444bac0551d08c33b24f is 50, key is test_row_0/B:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:11,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742352_1528 (size=12301) 2024-11-12T19:35:12,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4c7e1bb7e352444bac0551d08c33b24f 2024-11-12T19:35:12,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/54a763cd2d4f420390b2a984fbb9aca9 is 50, key is test_row_0/C:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:12,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742353_1529 (size=12301) 2024-11-12T19:35:12,257 DEBUG [Thread-1950 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x315a23ef to 127.0.0.1:60358 2024-11-12T19:35:12,257 DEBUG [Thread-1950 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:12,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/54a763cd2d4f420390b2a984fbb9aca9 2024-11-12T19:35:12,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/419df89e5485497b90fa8075e58b20b8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8 2024-11-12T19:35:12,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8, entries=150, sequenceid=450, filesize=12.0 K 2024-11-12T19:35:12,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/4c7e1bb7e352444bac0551d08c33b24f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f 2024-11-12T19:35:12,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f, entries=150, sequenceid=450, filesize=12.0 K 2024-11-12T19:35:12,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/54a763cd2d4f420390b2a984fbb9aca9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9 2024-11-12T19:35:12,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9, entries=150, sequenceid=450, filesize=12.0 K 2024-11-12T19:35:12,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for ce8b2c342e0c55d57c9696ce6e06a527 in 1244ms, sequenceid=450, compaction requested=true 2024-11-12T19:35:12,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ce8b2c342e0c55d57c9696ce6e06a527:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:12,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/A is initiating minor compaction (all files) 2024-11-12T19:35:12,590 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/B is initiating minor compaction (all files) 2024-11-12T19:35:12,590 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/A in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:12,590 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/B in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:12,590 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7eb3ae2d7d53440c82a7fafa08faf173, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=37.0 K 2024-11-12T19:35:12,590 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3e0645cbffff4a0bb2ebb1bb99e5c786, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=37.0 K 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e0645cbffff4a0bb2ebb1bb99e5c786, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 7eb3ae2d7d53440c82a7fafa08faf173, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c703cd7bc14e497b8412a6d045440dc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1731440108747 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e5d84af08edd4d639ddd5be786ca44d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1731440108747 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 419df89e5485497b90fa8075e58b20b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1731440109153 2024-11-12T19:35:12,591 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c7e1bb7e352444bac0551d08c33b24f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1731440109153 2024-11-12T19:35:12,596 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#B#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:12,596 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#A#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:12,596 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/3d2cf4b2bae641adb2e0a0828bde1fae is 50, key is test_row_0/B:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:12,596 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/672ef1799d0b43d6b15332a8905a1e7a is 50, key is test_row_0/A:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:12,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742354_1530 (size=13391) 2024-11-12T19:35:12,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742355_1531 (size=13391) 2024-11-12T19:35:12,606 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/672ef1799d0b43d6b15332a8905a1e7a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/672ef1799d0b43d6b15332a8905a1e7a 2024-11-12T19:35:12,606 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/3d2cf4b2bae641adb2e0a0828bde1fae as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/3d2cf4b2bae641adb2e0a0828bde1fae 2024-11-12T19:35:12,609 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/A of ce8b2c342e0c55d57c9696ce6e06a527 into 672ef1799d0b43d6b15332a8905a1e7a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:12,609 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/B of ce8b2c342e0c55d57c9696ce6e06a527 into 3d2cf4b2bae641adb2e0a0828bde1fae(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:12,609 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/B, priority=13, startTime=1731440112590; duration=0sec 2024-11-12T19:35:12,609 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/A, priority=13, startTime=1731440112589; duration=0sec 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:A 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:B 2024-11-12T19:35:12,609 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:12,610 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:12,610 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): ce8b2c342e0c55d57c9696ce6e06a527/C is initiating minor compaction (all files) 2024-11-12T19:35:12,610 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ce8b2c342e0c55d57c9696ce6e06a527/C in TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:12,610 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/578151087f7b443a8b59d7c481af3870, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp, totalSize=37.0 K 2024-11-12T19:35:12,610 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 578151087f7b443a8b59d7c481af3870, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1731440108089 2024-11-12T19:35:12,610 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53a2d5c6e0044ee6a91aa844e7a466f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1731440108747 2024-11-12T19:35:12,610 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54a763cd2d4f420390b2a984fbb9aca9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1731440109153 2024-11-12T19:35:12,615 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ce8b2c342e0c55d57c9696ce6e06a527#C#compaction#455 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:12,615 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/547884d6626e4ff7859ab43c32b3936f is 50, key is test_row_0/C:col10/1731440109153/Put/seqid=0 2024-11-12T19:35:12,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742356_1532 (size=13391) 2024-11-12T19:35:13,022 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/547884d6626e4ff7859ab43c32b3936f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/547884d6626e4ff7859ab43c32b3936f 2024-11-12T19:35:13,025 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ce8b2c342e0c55d57c9696ce6e06a527/C of ce8b2c342e0c55d57c9696ce6e06a527 into 547884d6626e4ff7859ab43c32b3936f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:13,025 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:13,025 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527., storeName=ce8b2c342e0c55d57c9696ce6e06a527/C, priority=13, startTime=1731440112590; duration=0sec 2024-11-12T19:35:13,025 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:13,026 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ce8b2c342e0c55d57c9696ce6e06a527:C 2024-11-12T19:35:15,468 DEBUG [Thread-1954 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x134bfe32 to 127.0.0.1:60358 2024-11-12T19:35:15,468 DEBUG [Thread-1948 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x345fa4f7 to 127.0.0.1:60358 2024-11-12T19:35:15,468 DEBUG [Thread-1948 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:15,468 DEBUG [Thread-1954 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:15,468 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1513 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4538 rows 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1497 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4491 rows 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1504 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4512 rows 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1512 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4535 rows 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1513 2024-11-12T19:35:15,469 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4539 rows 2024-11-12T19:35:15,469 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:35:15,469 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048087da to 127.0.0.1:60358 2024-11-12T19:35:15,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:15,472 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:35:15,473 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:35:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:15,476 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440115475"}]},"ts":"1731440115475"} 2024-11-12T19:35:15,476 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:35:15,521 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:35:15,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:35:15,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, UNASSIGN}] 2024-11-12T19:35:15,526 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, UNASSIGN 2024-11-12T19:35:15,527 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=ce8b2c342e0c55d57c9696ce6e06a527, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:15,528 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46265 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=81d69e608036,33067,1731439956493, table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-12T19:35:15,529 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:35:15,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; CloseRegionProcedure ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:35:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:15,681 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:15,681 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(124): Close ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:15,681 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1681): Closing ce8b2c342e0c55d57c9696ce6e06a527, disabling compactions & flushes 2024-11-12T19:35:15,682 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. after waiting 0 ms 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:15,682 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(2837): Flushing ce8b2c342e0c55d57c9696ce6e06a527 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=A 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=B 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ce8b2c342e0c55d57c9696ce6e06a527, store=C 2024-11-12T19:35:15,682 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:15,686 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/39e073fb4fb24d598e27784e85371058 is 50, key is test_row_1/A:col10/1731440115466/Put/seqid=0 2024-11-12T19:35:15,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742357_1533 (size=9857) 2024-11-12T19:35:15,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:16,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:16,092 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/39e073fb4fb24d598e27784e85371058 2024-11-12T19:35:16,101 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dcd613528c744f54938c445626ddb625 is 50, key is test_row_1/B:col10/1731440115466/Put/seqid=0 2024-11-12T19:35:16,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742358_1534 (size=9857) 2024-11-12T19:35:16,154 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:16,506 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dcd613528c744f54938c445626ddb625 2024-11-12T19:35:16,514 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/d3f21f54cd0a464ca1c7f74572ef577a is 50, key is test_row_1/C:col10/1731440115466/Put/seqid=0 2024-11-12T19:35:16,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742359_1535 (size=9857) 2024-11-12T19:35:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:16,920 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/d3f21f54cd0a464ca1c7f74572ef577a 2024-11-12T19:35:16,924 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/A/39e073fb4fb24d598e27784e85371058 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/39e073fb4fb24d598e27784e85371058 2024-11-12T19:35:16,927 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/39e073fb4fb24d598e27784e85371058, entries=100, sequenceid=460, filesize=9.6 K 2024-11-12T19:35:16,928 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/B/dcd613528c744f54938c445626ddb625 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dcd613528c744f54938c445626ddb625 2024-11-12T19:35:16,933 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dcd613528c744f54938c445626ddb625, entries=100, sequenceid=460, filesize=9.6 K 2024-11-12T19:35:16,933 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/.tmp/C/d3f21f54cd0a464ca1c7f74572ef577a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/d3f21f54cd0a464ca1c7f74572ef577a 2024-11-12T19:35:16,937 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/d3f21f54cd0a464ca1c7f74572ef577a, entries=100, sequenceid=460, filesize=9.6 K 2024-11-12T19:35:16,937 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ce8b2c342e0c55d57c9696ce6e06a527 in 1255ms, sequenceid=460, compaction requested=false 2024-11-12T19:35:16,938 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/35516ff644cf4bc1b1ee4fb45a9b5e0d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e85962da8ec146d7a8f0f60b94014062, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/6d8cfa29e40a4479960ff8d18df22c07, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b2a1192be2794a83b7d6c072fac5f17b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/d3b98170b7114d1a8df2332037bba8d0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/107e6e2c919f476b8fec23c6405d487d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/191cad47466a40b786d42da9fe71cc13, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/ab872127a8614189a8d3cb19bf9abdc5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3e0645cbffff4a0bb2ebb1bb99e5c786, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8] to archive 2024-11-12T19:35:16,939 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:16,940 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2f9e7526f4bc4c108c25946a86e46fb5 2024-11-12T19:35:16,941 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0411a6051b5444e489e974ca0c378c35 2024-11-12T19:35:16,942 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/35516ff644cf4bc1b1ee4fb45a9b5e0d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/35516ff644cf4bc1b1ee4fb45a9b5e0d 2024-11-12T19:35:16,944 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/8264026ce8a14545acde58e6bc2eca8e 2024-11-12T19:35:16,945 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/42adcf78ac5a4e189aaf156a8d13e418 2024-11-12T19:35:16,946 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e85962da8ec146d7a8f0f60b94014062 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e85962da8ec146d7a8f0f60b94014062 2024-11-12T19:35:16,947 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/bab319cbcb0d433090f8100a337c87da 2024-11-12T19:35:16,948 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/2a6358f20e0b40ab8d7a209c98425cd7 2024-11-12T19:35:16,949 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3d0c7dd56c704b90a5bbe6d98d4292b1 2024-11-12T19:35:16,949 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e6a6683ea3cf44d2ba81dbe81f1fc49c 2024-11-12T19:35:16,950 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/6d8cfa29e40a4479960ff8d18df22c07 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/6d8cfa29e40a4479960ff8d18df22c07 2024-11-12T19:35:16,951 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/841fbbd287a8413ea5f7d5076a4a3fc7 2024-11-12T19:35:16,952 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b7b199b2ff274764b383575d87804ecf 2024-11-12T19:35:16,953 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b2a1192be2794a83b7d6c072fac5f17b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b2a1192be2794a83b7d6c072fac5f17b 2024-11-12T19:35:16,954 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/657081ffb3914ee3964423df517f54ba 2024-11-12T19:35:16,956 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/d3b98170b7114d1a8df2332037bba8d0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/d3b98170b7114d1a8df2332037bba8d0 2024-11-12T19:35:16,957 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/816e1bda02034b7799b2d631ba7c70bd 2024-11-12T19:35:16,957 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f332ea1d23b74b2d910a6586ccdc638a 2024-11-12T19:35:16,958 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/107e6e2c919f476b8fec23c6405d487d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/107e6e2c919f476b8fec23c6405d487d 2024-11-12T19:35:16,959 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/e8d9b3ac42dc468e9db245adefd555d0 2024-11-12T19:35:16,960 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/0c72c4d174e341b4a7724a8fdad9f29e 2024-11-12T19:35:16,961 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/174a6946fe8d41178d8b108ff70b87db 2024-11-12T19:35:16,962 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/057b20789289480da1ee9e269fdcedd7 2024-11-12T19:35:16,963 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/191cad47466a40b786d42da9fe71cc13 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/191cad47466a40b786d42da9fe71cc13 2024-11-12T19:35:16,964 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/9b82603d071043d58f38c02d2a9ae72d 2024-11-12T19:35:16,965 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/f67d2acbea914c9c86e216c85b448e2c 2024-11-12T19:35:16,966 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/ab872127a8614189a8d3cb19bf9abdc5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/ab872127a8614189a8d3cb19bf9abdc5 2024-11-12T19:35:16,967 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/09327af6a1e847538d88f73f5821a723 2024-11-12T19:35:16,968 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3e0645cbffff4a0bb2ebb1bb99e5c786 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/3e0645cbffff4a0bb2ebb1bb99e5c786 2024-11-12T19:35:16,969 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/b580711c2192459e9a68df87ae25501f 2024-11-12T19:35:16,970 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/c703cd7bc14e497b8412a6d045440dc0 2024-11-12T19:35:16,971 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/419df89e5485497b90fa8075e58b20b8 2024-11-12T19:35:16,972 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7f0f4993cdbd4d819b6e45765bf09474, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d5e5fa61f25540f180eaf9ef6cde44ad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/48472fc7b5ce48c9aaa7878e999b4804, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d3b33fc1d3e0417dae7efe5d2b941a58, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/973e9be1089e4fc7aa564da0d08d6da4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/38fd71ca15b44819b65b97d22482afd8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/859af1ee7d104ea3ae101e8553b1a9f8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/da79ec24855045d1a46337cc9c174a9b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7eb3ae2d7d53440c82a7fafa08faf173, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f] to archive 2024-11-12T19:35:16,973 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:16,975 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4d3293e4c11a4c6bac5d622323d06356 2024-11-12T19:35:16,976 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/24e02d144aaa45deb23e2848aeb19368 2024-11-12T19:35:16,978 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7f0f4993cdbd4d819b6e45765bf09474 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7f0f4993cdbd4d819b6e45765bf09474 2024-11-12T19:35:16,979 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/9631362c835c42b6a2abe6a3279271ed 2024-11-12T19:35:16,980 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/eaa3e25d260d40e69459348a3b1c9fd5 2024-11-12T19:35:16,981 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d5e5fa61f25540f180eaf9ef6cde44ad to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d5e5fa61f25540f180eaf9ef6cde44ad 2024-11-12T19:35:16,982 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/c22391afe1b545a9bff9d49c1e16a481 2024-11-12T19:35:16,983 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/239345cc2d53416ba6dfa5cff5e3f4f6 2024-11-12T19:35:16,987 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57e7f355f518488a821334341c2ea4db 2024-11-12T19:35:16,988 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/48472fc7b5ce48c9aaa7878e999b4804 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/48472fc7b5ce48c9aaa7878e999b4804 2024-11-12T19:35:16,990 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/5e61332ff38b442ca627c86935f47655 2024-11-12T19:35:16,991 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/54150eaf6e674cd0be8d651a87c172a2 2024-11-12T19:35:16,992 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d3b33fc1d3e0417dae7efe5d2b941a58 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d3b33fc1d3e0417dae7efe5d2b941a58 2024-11-12T19:35:16,993 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e8d086f35bab482ea41fdba3ac459398 2024-11-12T19:35:16,994 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7309e789ebe240728a963acceb68db51 2024-11-12T19:35:16,995 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/973e9be1089e4fc7aa564da0d08d6da4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/973e9be1089e4fc7aa564da0d08d6da4 2024-11-12T19:35:16,996 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/bc701c157ede4f1b8798afdaedac9967 2024-11-12T19:35:16,997 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/fec0fd41c66c48f7a3a142212be7dcf7 2024-11-12T19:35:16,998 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/38fd71ca15b44819b65b97d22482afd8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/38fd71ca15b44819b65b97d22482afd8 2024-11-12T19:35:16,999 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/a0be45237e3e40fa87b77b0632ed28bc 2024-11-12T19:35:17,000 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/145f4a2025034a9084028f7e36f7d2fc 2024-11-12T19:35:17,001 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/701ac1cb331a45c3838363f044ab30f5 2024-11-12T19:35:17,002 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/859af1ee7d104ea3ae101e8553b1a9f8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/859af1ee7d104ea3ae101e8553b1a9f8 2024-11-12T19:35:17,003 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/d94a9a8caaa849dc8527d8b353f19ea3 2024-11-12T19:35:17,004 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dedda3cb3db44baf9bc8f3f9db5e1ac9 2024-11-12T19:35:17,005 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/da79ec24855045d1a46337cc9c174a9b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/da79ec24855045d1a46337cc9c174a9b 2024-11-12T19:35:17,006 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/57a4d7120f27406c8085f7e096a72d9c 2024-11-12T19:35:17,007 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/98fd1df263934a3488e72668d32230cb 2024-11-12T19:35:17,008 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7eb3ae2d7d53440c82a7fafa08faf173 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/7eb3ae2d7d53440c82a7fafa08faf173 2024-11-12T19:35:17,009 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/346e9c33848e42cba9ff050ed7102454 2024-11-12T19:35:17,010 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/e5d84af08edd4d639ddd5be786ca44d9 2024-11-12T19:35:17,011 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/4c7e1bb7e352444bac0551d08c33b24f 2024-11-12T19:35:17,012 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b263d6c84a741e98c07b91b252dde25, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8fcee0be92d744b7b49af3aa3caea190, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2d4cc320e3cb4e0a87ebc40300da2f89, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3dd9db348ade4ba6ad584b8fced3067e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b0170d01d0e4f5caa6e4307b7240fee, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3df723f3306c49ebbe52a35aed6ce5e5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8c1e31a5e72a40fba03913c540eb7229, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/f88baf8661db432cbdb82210a742efc0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/578151087f7b443a8b59d7c481af3870, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9] to archive 2024-11-12T19:35:17,013 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:17,015 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/9bdebbe0d32c47cd90eae15a299796ff 2024-11-12T19:35:17,016 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/7ad22d13bf1b4b1e895a1cac199acfd9 2024-11-12T19:35:17,017 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b263d6c84a741e98c07b91b252dde25 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b263d6c84a741e98c07b91b252dde25 2024-11-12T19:35:17,018 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b35f9a4a798c435b93b933b067ddafa4 2024-11-12T19:35:17,020 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e37d8ece8ae240dd9b030d87897b6103 2024-11-12T19:35:17,021 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8fcee0be92d744b7b49af3aa3caea190 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8fcee0be92d744b7b49af3aa3caea190 2024-11-12T19:35:17,022 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bec9d75d31394c3d8736cb513b1861e5 2024-11-12T19:35:17,022 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/bf3c340cce3341dbb66d59f224106370 2024-11-12T19:35:17,023 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e8991ba68bf04e2b89227cb6ed24cb41 2024-11-12T19:35:17,024 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2d4cc320e3cb4e0a87ebc40300da2f89 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2d4cc320e3cb4e0a87ebc40300da2f89 2024-11-12T19:35:17,025 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/88b18ad7551a4796b51c5de120a7d075 2024-11-12T19:35:17,026 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a1a58596c20143ceaead4e6e8f55f305 2024-11-12T19:35:17,026 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3dd9db348ade4ba6ad584b8fced3067e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3dd9db348ade4ba6ad584b8fced3067e 2024-11-12T19:35:17,027 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/2e4241b550c14fd98593a5921948c206 2024-11-12T19:35:17,028 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/fef1b1ce473247f882bde422644223a6 2024-11-12T19:35:17,029 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b0170d01d0e4f5caa6e4307b7240fee to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0b0170d01d0e4f5caa6e4307b7240fee 2024-11-12T19:35:17,029 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/48e86c68637345dcbd84788fce036a87 2024-11-12T19:35:17,030 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/252a9449f0354a29a7c73d0469827e5d 2024-11-12T19:35:17,031 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3df723f3306c49ebbe52a35aed6ce5e5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/3df723f3306c49ebbe52a35aed6ce5e5 2024-11-12T19:35:17,032 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/67d445a4581241f1bb3305ac02cf2496 2024-11-12T19:35:17,033 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/e171967c8ab948b48acf30e5ee4fd287 2024-11-12T19:35:17,033 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/b4a86712fe7f4165bdb1926577672249 2024-11-12T19:35:17,034 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8c1e31a5e72a40fba03913c540eb7229 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/8c1e31a5e72a40fba03913c540eb7229 2024-11-12T19:35:17,035 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/a9cf60d4e9c9454ea635290273da7617 2024-11-12T19:35:17,036 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/31b24cfb42974b3c89d8a216986517e7 2024-11-12T19:35:17,036 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/f88baf8661db432cbdb82210a742efc0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/f88baf8661db432cbdb82210a742efc0 2024-11-12T19:35:17,037 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/0008c1c407f5421898c26ce3ecbe17e0 2024-11-12T19:35:17,038 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/4a7f2fa56ba34d5192490d3365e16c91 2024-11-12T19:35:17,038 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/578151087f7b443a8b59d7c481af3870 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/578151087f7b443a8b59d7c481af3870 2024-11-12T19:35:17,039 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/6c4d4e583b0144b9acb84825270725b3 2024-11-12T19:35:17,040 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/53a2d5c6e0044ee6a91aa844e7a466f8 2024-11-12T19:35:17,041 DEBUG [StoreCloser-TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/54a763cd2d4f420390b2a984fbb9aca9 2024-11-12T19:35:17,044 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/recovered.edits/463.seqid, newMaxSeqId=463, maxSeqId=1 2024-11-12T19:35:17,044 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527. 2024-11-12T19:35:17,044 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1635): Region close journal for ce8b2c342e0c55d57c9696ce6e06a527: 2024-11-12T19:35:17,045 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(170): Closed ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:17,046 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=ce8b2c342e0c55d57c9696ce6e06a527, regionState=CLOSED 2024-11-12T19:35:17,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-12T19:35:17,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseRegionProcedure ce8b2c342e0c55d57c9696ce6e06a527, server=81d69e608036,33067,1731439956493 in 1.5180 sec 2024-11-12T19:35:17,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=137 2024-11-12T19:35:17,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=137, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ce8b2c342e0c55d57c9696ce6e06a527, UNASSIGN in 1.5230 sec 2024-11-12T19:35:17,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-12T19:35:17,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5270 sec 2024-11-12T19:35:17,052 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440117052"}]},"ts":"1731440117052"} 2024-11-12T19:35:17,052 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:35:17,095 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:35:17,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6230 sec 2024-11-12T19:35:17,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-12T19:35:17,581 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-12T19:35:17,582 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:35:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,585 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-12T19:35:17,587 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=140, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,591 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:17,595 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/recovered.edits] 2024-11-12T19:35:17,597 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/39e073fb4fb24d598e27784e85371058 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/39e073fb4fb24d598e27784e85371058 2024-11-12T19:35:17,598 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/672ef1799d0b43d6b15332a8905a1e7a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/A/672ef1799d0b43d6b15332a8905a1e7a 2024-11-12T19:35:17,599 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/3d2cf4b2bae641adb2e0a0828bde1fae to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/3d2cf4b2bae641adb2e0a0828bde1fae 2024-11-12T19:35:17,600 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dcd613528c744f54938c445626ddb625 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/B/dcd613528c744f54938c445626ddb625 2024-11-12T19:35:17,602 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/547884d6626e4ff7859ab43c32b3936f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/547884d6626e4ff7859ab43c32b3936f 2024-11-12T19:35:17,603 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/d3f21f54cd0a464ca1c7f74572ef577a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/C/d3f21f54cd0a464ca1c7f74572ef577a 2024-11-12T19:35:17,605 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/recovered.edits/463.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527/recovered.edits/463.seqid 2024-11-12T19:35:17,605 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/ce8b2c342e0c55d57c9696ce6e06a527 2024-11-12T19:35:17,605 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:35:17,607 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=140, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,608 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:35:17,610 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:35:17,611 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=140, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,611 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:35:17,612 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731440117611"}]},"ts":"9223372036854775807"} 2024-11-12T19:35:17,613 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:35:17,613 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ce8b2c342e0c55d57c9696ce6e06a527, NAME => 'TestAcidGuarantees,,1731440088380.ce8b2c342e0c55d57c9696ce6e06a527.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:35:17,613 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:35:17,614 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731440117613"}]},"ts":"9223372036854775807"} 2024-11-12T19:35:17,615 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:35:17,653 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=140, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 70 msec 2024-11-12T19:35:17,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-12T19:35:17,688 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-12T19:35:17,704 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237 (was 235) - Thread LEAK? -, OpenFileDescriptor=449 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1013 (was 1109), ProcessCount=11 (was 11), AvailableMemoryMB=873 (was 1349) 2024-11-12T19:35:17,713 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=1013, ProcessCount=11, AvailableMemoryMB=873 2024-11-12T19:35:17,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:35:17,714 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:35:17,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:17,716 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T19:35:17,716 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:17,716 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 141 2024-11-12T19:35:17,716 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T19:35:17,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742360_1536 (size=960) 2024-11-12T19:35:17,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:18,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:18,122 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8 2024-11-12T19:35:18,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742361_1537 (size=53) 2024-11-12T19:35:18,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:18,428 ERROR [LeaseRenewer:jenkins@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:41367,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:18,526 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:35:18,526 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9487c3b0150a5aa38a9544f87bbf2a6e, disabling compactions & flushes 2024-11-12T19:35:18,526 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,526 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,527 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. after waiting 0 ms 2024-11-12T19:35:18,527 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,527 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,527 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:18,527 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T19:35:18,528 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731440118527"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731440118527"}]},"ts":"1731440118527"} 2024-11-12T19:35:18,529 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-12T19:35:18,529 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T19:35:18,529 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440118529"}]},"ts":"1731440118529"} 2024-11-12T19:35:18,530 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-12T19:35:18,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, ASSIGN}] 2024-11-12T19:35:18,581 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, ASSIGN 2024-11-12T19:35:18,581 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, ASSIGN; state=OFFLINE, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=false 2024-11-12T19:35:18,732 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:18,734 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; OpenRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:35:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:18,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:18,889 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,889 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7285): Opening region: {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:35:18,889 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,890 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:35:18,890 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7327): checking encryption for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,890 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7330): checking classloading for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,891 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,892 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:18,893 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName A 2024-11-12T19:35:18,893 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:18,893 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:18,893 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,894 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:18,894 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName B 2024-11-12T19:35:18,895 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:18,895 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:18,895 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,896 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:18,896 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName C 2024-11-12T19:35:18,896 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:18,897 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:18,897 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,898 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,898 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,900 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:35:18,901 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1085): writing seq id for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:18,904 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T19:35:18,904 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1102): Opened 9487c3b0150a5aa38a9544f87bbf2a6e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68628814, jitterRate=0.022649019956588745}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:35:18,905 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1001): Region open journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:18,906 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., pid=143, masterSystemTime=1731440118887 2024-11-12T19:35:18,908 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,908 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:18,908 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=OPEN, openSeqNum=2, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:18,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-12T19:35:18,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; OpenRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 in 175 msec 2024-11-12T19:35:18,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-11-12T19:35:18,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, ASSIGN in 332 msec 2024-11-12T19:35:18,913 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T19:35:18,913 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440118913"}]},"ts":"1731440118913"} 2024-11-12T19:35:18,915 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-12T19:35:19,092 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T19:35:19,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3780 sec 2024-11-12T19:35:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-12T19:35:19,821 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 141 completed 2024-11-12T19:35:19,823 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a1fe6e4 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58e7ba75 2024-11-12T19:35:19,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68ba132a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:19,894 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:19,896 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:19,898 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T19:35:19,899 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T19:35:19,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-12T19:35:19,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T19:35:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:19,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742362_1538 (size=996) 2024-11-12T19:35:20,317 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-12T19:35:20,317 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-12T19:35:20,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:35:20,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, REOPEN/MOVE}] 2024-11-12T19:35:20,322 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, REOPEN/MOVE 2024-11-12T19:35:20,322 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:20,323 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:35:20,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:35:20,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:20,474 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,474 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:35:20,474 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 9487c3b0150a5aa38a9544f87bbf2a6e, disabling compactions & flushes 2024-11-12T19:35:20,474 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,474 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,475 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. after waiting 1 ms 2024-11-12T19:35:20,475 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,477 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-12T19:35:20,478 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,478 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:20,478 WARN [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionServer(3786): Not adding moved region record: 9487c3b0150a5aa38a9544f87bbf2a6e to self. 2024-11-12T19:35:20,479 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,480 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=CLOSED 2024-11-12T19:35:20,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-12T19:35:20,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 in 157 msec 2024-11-12T19:35:20,482 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, REOPEN/MOVE; state=CLOSED, location=81d69e608036,33067,1731439956493; forceNewPlan=false, retain=true 2024-11-12T19:35:20,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=OPENING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:20,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=146, state=RUNNABLE; OpenRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:35:20,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:20,788 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,788 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7285): Opening region: {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} 2024-11-12T19:35:20,788 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,788 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T19:35:20,788 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7327): checking encryption for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,789 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7330): checking classloading for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,790 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,790 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:20,790 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName A 2024-11-12T19:35:20,791 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:20,792 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:20,792 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,793 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:20,793 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName B 2024-11-12T19:35:20,793 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:20,794 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:20,794 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,795 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-12T19:35:20,795 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9487c3b0150a5aa38a9544f87bbf2a6e columnFamilyName C 2024-11-12T19:35:20,795 DEBUG [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:20,796 INFO [StoreOpener-9487c3b0150a5aa38a9544f87bbf2a6e-1 {}] regionserver.HStore(327): Store=9487c3b0150a5aa38a9544f87bbf2a6e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T19:35:20,796 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,796 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,797 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,799 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T19:35:20,800 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1085): writing seq id for 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:20,801 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1102): Opened 9487c3b0150a5aa38a9544f87bbf2a6e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72553160, jitterRate=0.08112633228302002}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T19:35:20,802 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1001): Region open journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:20,803 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., pid=148, masterSystemTime=1731440120785 2024-11-12T19:35:20,804 DEBUG [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,804 INFO [RS_OPEN_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:20,804 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=OPEN, openSeqNum=5, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:20,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=146 2024-11-12T19:35:20,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=146, state=SUCCESS; OpenRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 in 171 msec 2024-11-12T19:35:20,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-12T19:35:20,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, REOPEN/MOVE in 485 msec 2024-11-12T19:35:20,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-12T19:35:20,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 489 msec 2024-11-12T19:35:20,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 906 msec 2024-11-12T19:35:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-12T19:35:20,813 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55544bc7 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3005670a 2024-11-12T19:35:20,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@81e0163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,890 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1208728f to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@92e7af3 2024-11-12T19:35:20,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71c377ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,897 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d0a9e33 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17899883 2024-11-12T19:35:20,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d1de3c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,915 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40e55f2a to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b739a35 2024-11-12T19:35:20,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e22139, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,922 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x271e8143 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20bb05a7 2024-11-12T19:35:20,930 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38481360, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,931 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a5ecd59 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62750e61 2024-11-12T19:35:20,947 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c078737, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,948 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61da8c1c to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b968040 2024-11-12T19:35:20,962 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf8843a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,962 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560a8819 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49019618 2024-11-12T19:35:20,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76670256, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,972 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3df30e37 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7887fec7 2024-11-12T19:35:20,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bc3633, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,981 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x745bf218 to 127.0.0.1:60358 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@336d4b92 2024-11-12T19:35:20,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64b7ee62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T19:35:20,990 DEBUG [hconnection-0x3f436697-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:20,991 DEBUG [hconnection-0x5801f8f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,991 DEBUG [hconnection-0x543f194-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-11-12T19:35:20,991 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,992 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-12T19:35:20,993 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53416, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,993 DEBUG [hconnection-0x1e0199a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,994 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:20,994 DEBUG [hconnection-0x36dc7adf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,994 DEBUG [hconnection-0x78a52a9e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,994 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:20,994 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:20,995 DEBUG [hconnection-0x3fa1c719-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,995 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,995 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53442, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,996 DEBUG [hconnection-0x64ab6fcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,997 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,997 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:20,997 DEBUG [hconnection-0x2412e9db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,998 DEBUG [hconnection-0x4706ed16-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T19:35:20,999 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:21,000 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T19:35:21,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:21,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:21,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d0b6d372533647c6b85aaf6f559d8027_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440121001/Put/seqid=0 2024-11-12T19:35:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742363_1539 (size=12154) 2024-11-12T19:35:21,035 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:21,038 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d0b6d372533647c6b85aaf6f559d8027_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d0b6d372533647c6b85aaf6f559d8027_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:21,039 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9471ba77acc445b89696cf1e5c09a270, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:21,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9471ba77acc445b89696cf1e5c09a270 is 175, key is test_row_0/A:col10/1731440121001/Put/seqid=0 2024-11-12T19:35:21,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742364_1540 (size=30955) 2024-11-12T19:35:21,054 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9471ba77acc445b89696cf1e5c09a270 2024-11-12T19:35:21,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/6e9feda1b65a4615bd74500f4638d17b is 50, key is test_row_0/B:col10/1731440121001/Put/seqid=0 2024-11-12T19:35:21,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742365_1541 (size=12001) 2024-11-12T19:35:21,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/6e9feda1b65a4615bd74500f4638d17b 2024-11-12T19:35:21,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-12T19:35:21,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/27ec76de0c95468b93feb04d71d452e0 is 50, key is test_row_0/C:col10/1731440121001/Put/seqid=0 2024-11-12T19:35:21,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742366_1542 (size=12001) 2024-11-12T19:35:21,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/27ec76de0c95468b93feb04d71d452e0 2024-11-12T19:35:21,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9471ba77acc445b89696cf1e5c09a270 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270 2024-11-12T19:35:21,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270, entries=150, sequenceid=16, filesize=30.2 K 2024-11-12T19:35:21,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/6e9feda1b65a4615bd74500f4638d17b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b 2024-11-12T19:35:21,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b, entries=150, sequenceid=16, filesize=11.7 K 2024-11-12T19:35:21,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/27ec76de0c95468b93feb04d71d452e0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0 2024-11-12T19:35:21,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0, entries=150, sequenceid=16, filesize=11.7 K 2024-11-12T19:35:21,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9487c3b0150a5aa38a9544f87bbf2a6e in 136ms, sequenceid=16, compaction requested=false 2024-11-12T19:35:21,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:21,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:21,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-12T19:35:21,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:21,148 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:21,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:21,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112012e6da16c1b48579aad003e7699c674_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440121020/Put/seqid=0 2024-11-12T19:35:21,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742367_1543 (size=12154) 2024-11-12T19:35:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-12T19:35:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:21,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:21,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:21,574 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112012e6da16c1b48579aad003e7699c674_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112012e6da16c1b48579aad003e7699c674_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:21,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/ec651701017f481d9e111f881469034a, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:21,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/ec651701017f481d9e111f881469034a is 175, key is test_row_0/A:col10/1731440121020/Put/seqid=0 2024-11-12T19:35:21,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742368_1544 (size=30955) 2024-11-12T19:35:21,585 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/ec651701017f481d9e111f881469034a 2024-11-12T19:35:21,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d9e0c212781448f0aea8fe234b648dfb is 50, key is test_row_0/B:col10/1731440121020/Put/seqid=0 2024-11-12T19:35:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-12T19:35:21,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742369_1545 (size=12001) 2024-11-12T19:35:21,609 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d9e0c212781448f0aea8fe234b648dfb 2024-11-12T19:35:21,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/3d5bcce152f1415c899f03de213eda46 is 50, key is test_row_0/C:col10/1731440121020/Put/seqid=0 2024-11-12T19:35:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742370_1546 (size=12001) 2024-11-12T19:35:21,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440181940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440181942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440181943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440181943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:21,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:21,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440181945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,024 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/3d5bcce152f1415c899f03de213eda46 2024-11-12T19:35:22,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/ec651701017f481d9e111f881469034a as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a 2024-11-12T19:35:22,030 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a, entries=150, sequenceid=41, filesize=30.2 K 2024-11-12T19:35:22,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d9e0c212781448f0aea8fe234b648dfb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb 2024-11-12T19:35:22,033 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb, entries=150, sequenceid=41, filesize=11.7 K 2024-11-12T19:35:22,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/3d5bcce152f1415c899f03de213eda46 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46 2024-11-12T19:35:22,036 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46, entries=150, sequenceid=41, filesize=11.7 K 2024-11-12T19:35:22,036 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9487c3b0150a5aa38a9544f87bbf2a6e in 888ms, sequenceid=41, compaction requested=false 2024-11-12T19:35:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-11-12T19:35:22,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-11-12T19:35:22,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-12T19:35:22,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0430 sec 2024-11-12T19:35:22,039 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 1.0470 sec 2024-11-12T19:35:22,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-12T19:35:22,098 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-12T19:35:22,099 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-11-12T19:35:22,100 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:22,100 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:22,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:22,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-12T19:35:22,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-12T19:35:22,252 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:22,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-12T19:35:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:22,252 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:35:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:22,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:22,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:22,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111212afcfcf12764d659258f7ab6088543e_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440121332/Put/seqid=0 2024-11-12T19:35:22,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742371_1547 (size=12154) 2024-11-12T19:35:22,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:22,264 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111212afcfcf12764d659258f7ab6088543e_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212afcfcf12764d659258f7ab6088543e_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:22,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/c17b2372a43c401a9872b1847080368b, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:22,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/c17b2372a43c401a9872b1847080368b is 175, key is test_row_0/A:col10/1731440121332/Put/seqid=0 2024-11-12T19:35:22,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742372_1548 (size=30955) 2024-11-12T19:35:22,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-12T19:35:22,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:22,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440182462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440182464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440182467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440182467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440182467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,565 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T19:35:22,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440182568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440182568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440182572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440182573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440182574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,670 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/c17b2372a43c401a9872b1847080368b 2024-11-12T19:35:22,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/faec61cdf01f47cfa6556329d070d782 is 50, key is test_row_0/B:col10/1731440121332/Put/seqid=0 2024-11-12T19:35:22,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742373_1549 (size=12001) 2024-11-12T19:35:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-12T19:35:22,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440182773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440182774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440182776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440182776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:22,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440182777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,079 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/faec61cdf01f47cfa6556329d070d782 2024-11-12T19:35:23,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440183077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440183077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440183079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440183080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440183083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/94e7f54bf41440f98778fc6ec0f1fa6e is 50, key is test_row_0/C:col10/1731440121332/Put/seqid=0 2024-11-12T19:35:23,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742374_1550 (size=12001) 2024-11-12T19:35:23,093 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/94e7f54bf41440f98778fc6ec0f1fa6e 2024-11-12T19:35:23,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/c17b2372a43c401a9872b1847080368b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b 2024-11-12T19:35:23,098 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b, entries=150, sequenceid=52, filesize=30.2 K 2024-11-12T19:35:23,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/faec61cdf01f47cfa6556329d070d782 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782 2024-11-12T19:35:23,102 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782, entries=150, sequenceid=52, filesize=11.7 K 2024-11-12T19:35:23,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/94e7f54bf41440f98778fc6ec0f1fa6e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e 2024-11-12T19:35:23,106 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e, entries=150, sequenceid=52, filesize=11.7 K 2024-11-12T19:35:23,111 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 9487c3b0150a5aa38a9544f87bbf2a6e in 858ms, sequenceid=52, compaction requested=true 2024-11-12T19:35:23,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:23,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:23,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-11-12T19:35:23,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-11-12T19:35:23,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-12T19:35:23,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-11-12T19:35:23,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 1.0140 sec 2024-11-12T19:35:23,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-12T19:35:23,204 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-12T19:35:23,206 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:23,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-11-12T19:35:23,207 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:23,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:23,207 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:23,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:23,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:23,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:23,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-12T19:35:23,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:23,359 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:35:23,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:23,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ee94b16cefc544d28c2119e562250e58_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440122465/Put/seqid=0 2024-11-12T19:35:23,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742375_1551 (size=12154) 2024-11-12T19:35:23,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:23,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:23,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:23,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440183586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440183588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440183588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440183589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440183589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440183689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440183692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440183692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440183692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:23,771 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ee94b16cefc544d28c2119e562250e58_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ee94b16cefc544d28c2119e562250e58_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:23,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/892824744e8d440a9cc8d2c1c06e6305, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:23,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/892824744e8d440a9cc8d2c1c06e6305 is 175, key is test_row_0/A:col10/1731440122465/Put/seqid=0 2024-11-12T19:35:23,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742376_1552 (size=30955) 2024-11-12T19:35:23,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:23,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440183892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440183895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440183895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:23,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440183895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,175 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/892824744e8d440a9cc8d2c1c06e6305 2024-11-12T19:35:24,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/af88a9399e304b838574fbc92b6b9c31 is 50, key is test_row_0/B:col10/1731440122465/Put/seqid=0 2024-11-12T19:35:24,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742377_1553 (size=12001) 2024-11-12T19:35:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440184197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440184198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440184198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440184199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:24,584 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/af88a9399e304b838574fbc92b6b9c31 2024-11-12T19:35:24,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1289fd5d27ef4737a94936383ee04a57 is 50, key is test_row_0/C:col10/1731440122465/Put/seqid=0 2024-11-12T19:35:24,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440184592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742378_1554 (size=12001) 2024-11-12T19:35:24,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440184702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440184702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440184703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440184704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:24,996 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1289fd5d27ef4737a94936383ee04a57 2024-11-12T19:35:24,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/892824744e8d440a9cc8d2c1c06e6305 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305 2024-11-12T19:35:25,002 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305, entries=150, sequenceid=79, filesize=30.2 K 2024-11-12T19:35:25,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/af88a9399e304b838574fbc92b6b9c31 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31 2024-11-12T19:35:25,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,006 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31, entries=150, sequenceid=79, filesize=11.7 K 2024-11-12T19:35:25,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1289fd5d27ef4737a94936383ee04a57 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57 2024-11-12T19:35:25,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,010 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57, entries=150, sequenceid=79, filesize=11.7 K 2024-11-12T19:35:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,010 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1651ms, sequenceid=79, compaction requested=true 2024-11-12T19:35:25,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:25,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:25,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-11-12T19:35:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-11-12T19:35:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-12T19:35:25,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8040 sec 2024-11-12T19:35:25,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 1.8060 sec 2024-11-12T19:35:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-12T19:35:25,311 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-12T19:35:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,314 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-12T19:35:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-12T19:35:25,316 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,316 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:25,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-12T19:35:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,467 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:25,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:25,468 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112df510198addc4081a350028328d57a44_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440123587/Put/seqid=0 2024-11-12T19:35:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742379_1555 (size=9714) 2024-11-12T19:35:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,478 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112df510198addc4081a350028328d57a44_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112df510198addc4081a350028328d57a44_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/f85c9f16669744a78f6839a3d9a7265b, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/f85c9f16669744a78f6839a3d9a7265b is 175, key is test_row_0/A:col10/1731440123587/Put/seqid=0 2024-11-12T19:35:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742380_1556 (size=22361) 2024-11-12T19:35:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,484 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=88, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/f85c9f16669744a78f6839a3d9a7265b 2024-11-12T19:35:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/9bc037b94fd146dd8fdf4e3508c3d055 is 50, key is test_row_0/B:col10/1731440123587/Put/seqid=0 2024-11-12T19:35:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742381_1557 (size=9657) 2024-11-12T19:35:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,492 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/9bc037b94fd146dd8fdf4e3508c3d055 2024-11-12T19:35:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/4ad0681f47234a2bbc24f47b7e18914e is 50, key is test_row_0/C:col10/1731440123587/Put/seqid=0 2024-11-12T19:35:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742382_1558 (size=9657) 2024-11-12T19:35:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-12T19:35:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:25,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:25,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440185734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440185737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440185738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440185739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440185840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440185843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440185843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440185843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:25,905 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/4ad0681f47234a2bbc24f47b7e18914e 2024-11-12T19:35:25,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/f85c9f16669744a78f6839a3d9a7265b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b 2024-11-12T19:35:25,911 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b, entries=100, sequenceid=88, filesize=21.8 K 2024-11-12T19:35:25,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/9bc037b94fd146dd8fdf4e3508c3d055 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055 2024-11-12T19:35:25,914 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055, entries=100, sequenceid=88, filesize=9.4 K 2024-11-12T19:35:25,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/4ad0681f47234a2bbc24f47b7e18914e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e 2024-11-12T19:35:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-12T19:35:25,919 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e, entries=100, sequenceid=88, filesize=9.4 K 2024-11-12T19:35:25,920 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9487c3b0150a5aa38a9544f87bbf2a6e in 452ms, sequenceid=88, compaction requested=true 2024-11-12T19:35:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-12T19:35:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-12T19:35:25,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-12T19:35:25,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 605 msec 2024-11-12T19:35:25,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 608 msec 2024-11-12T19:35:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:26,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:26,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411123449fd1add18455b9dbb0388720e7e36_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:26,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742383_1559 (size=14594) 2024-11-12T19:35:26,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440186080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440186080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440186086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440186086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440186186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440186186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440186192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440186192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440186389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440186389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440186395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440186396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-12T19:35:26,419 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-12T19:35:26,419 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:26,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-12T19:35:26,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:26,420 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:26,421 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:26,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:26,455 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:26,458 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411123449fd1add18455b9dbb0388720e7e36_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123449fd1add18455b9dbb0388720e7e36_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:26,458 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/dbbacadaa1d94efe95dd2443cd053847, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:26,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/dbbacadaa1d94efe95dd2443cd053847 is 175, key is test_row_0/A:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:26,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742384_1560 (size=39549) 2024-11-12T19:35:26,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:26,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:26,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:26,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:26,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440186594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,598 DEBUG [Thread-2407 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:26,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440186693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440186694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440186698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:26,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440186698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:26,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:26,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:26,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:26,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:26,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,862 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/dbbacadaa1d94efe95dd2443cd053847 2024-11-12T19:35:26,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/47afa6a2c32647a5a2eb8d6ae84c5972 is 50, key is test_row_0/B:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:26,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742385_1561 (size=12001) 2024-11-12T19:35:26,877 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:26,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:26,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:26,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:26,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:26,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:27,029 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:27,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440187199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:27,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440187200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:27,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440187204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:27,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440187205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:27,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/47afa6a2c32647a5a2eb8d6ae84c5972 2024-11-12T19:35:27,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/0ff43cf6b7764de687c53bce9406b356 is 50, key is test_row_0/C:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:27,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742386_1562 (size=12001) 2024-11-12T19:35:27,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:27,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,491 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:27,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:27,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:27,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:27,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/0ff43cf6b7764de687c53bce9406b356 2024-11-12T19:35:27,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/dbbacadaa1d94efe95dd2443cd053847 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847 2024-11-12T19:35:27,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847, entries=200, sequenceid=116, filesize=38.6 K 2024-11-12T19:35:27,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/47afa6a2c32647a5a2eb8d6ae84c5972 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972 2024-11-12T19:35:27,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972, entries=150, sequenceid=116, filesize=11.7 K 2024-11-12T19:35:27,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/0ff43cf6b7764de687c53bce9406b356 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356 2024-11-12T19:35:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356, entries=150, sequenceid=116, filesize=11.7 K 2024-11-12T19:35:27,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1645ms, sequenceid=116, compaction requested=true 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:27,692 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:27,692 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:27,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,693 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 185730 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-12T19:35:27,693 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 69662 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-12T19:35:27,693 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:27,693 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:27,693 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,693 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,694 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=68.0 K 2024-11-12T19:35:27,694 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=181.4 K 2024-11-12T19:35:27,694 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,694 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847] 2024-11-12T19:35:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,694 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9471ba77acc445b89696cf1e5c09a270, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731440121000 2024-11-12T19:35:27,694 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e9feda1b65a4615bd74500f4638d17b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731440121000 2024-11-12T19:35:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,694 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d9e0c212781448f0aea8fe234b648dfb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731440121015 2024-11-12T19:35:27,694 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec651701017f481d9e111f881469034a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731440121015 2024-11-12T19:35:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,695 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting c17b2372a43c401a9872b1847080368b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731440121328 2024-11-12T19:35:27,695 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting faec61cdf01f47cfa6556329d070d782, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731440121328 2024-11-12T19:35:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,695 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 892824744e8d440a9cc8d2c1c06e6305, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731440122460 2024-11-12T19:35:27,696 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f85c9f16669744a78f6839a3d9a7265b, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440123587 2024-11-12T19:35:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,696 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting af88a9399e304b838574fbc92b6b9c31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731440122460 2024-11-12T19:35:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,696 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bc037b94fd146dd8fdf4e3508c3d055, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440123587 2024-11-12T19:35:27,696 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbbacadaa1d94efe95dd2443cd053847, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,697 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 47afa6a2c32647a5a2eb8d6ae84c5972, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:27,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,714 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#477 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,714 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/7eef6f09ce6e418e8cb581a72aa001c7 is 50, key is test_row_0/B:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,723 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:27,726 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112bcaf0a85ab9d472e9e2421ecb37a8783_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,730 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112bcaf0a85ab9d472e9e2421ecb37a8783_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,730 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112bcaf0a85ab9d472e9e2421ecb37a8783_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742387_1563 (size=12207) 2024-11-12T19:35:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,735 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/7eef6f09ce6e418e8cb581a72aa001c7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7eef6f09ce6e418e8cb581a72aa001c7 2024-11-12T19:35:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,739 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into 7eef6f09ce6e418e8cb581a72aa001c7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:27,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:27,739 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=10, startTime=1731440127692; duration=0sec 2024-11-12T19:35:27,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:27,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:27,739 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-12T19:35:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,741 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 69662 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-12T19:35:27,741 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,741 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,741 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=68.0 K 2024-11-12T19:35:27,741 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 27ec76de0c95468b93feb04d71d452e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731440121000 2024-11-12T19:35:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,741 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d5bcce152f1415c899f03de213eda46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1731440121015 2024-11-12T19:35:27,742 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 94e7f54bf41440f98778fc6ec0f1fa6e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731440121328 2024-11-12T19:35:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,742 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 1289fd5d27ef4737a94936383ee04a57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731440122460 2024-11-12T19:35:27,742 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ad0681f47234a2bbc24f47b7e18914e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731440123587 2024-11-12T19:35:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,742 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ff43cf6b7764de687c53bce9406b356, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742388_1564 (size=4469) 2024-11-12T19:35:27,753 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#478 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:27,754 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/07fbd840539149168f6dc04de7a42fbb is 175, key is test_row_0/A:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:27,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,757 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#479 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:27,758 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f50243a15ac74fba994ff3ed9da0c699 is 50, key is test_row_0/C:col10/1731440126046/Put/seqid=0 2024-11-12T19:35:27,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742389_1565 (size=31161) 2024-11-12T19:35:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742390_1566 (size=12207) 2024-11-12T19:35:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,771 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f50243a15ac74fba994ff3ed9da0c699 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f50243a15ac74fba994ff3ed9da0c699 2024-11-12T19:35:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,775 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into f50243a15ac74fba994ff3ed9da0c699(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:27,775 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:27,775 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=10, startTime=1731440127692; duration=0sec 2024-11-12T19:35:27,776 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:27,776 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:27,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:27,796 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:27,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111247a187e2d6fd47f0bd4ab043b3c39f36_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440126085/Put/seqid=0 2024-11-12T19:35:27,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742391_1567 (size=9714) 2024-11-12T19:35:27,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,808 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111247a187e2d6fd47f0bd4ab043b3c39f36_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111247a187e2d6fd47f0bd4ab043b3c39f36_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/a5c43ec287844250bdfefa1b06557355, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:27,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/a5c43ec287844250bdfefa1b06557355 is 175, key is test_row_0/A:col10/1731440126085/Put/seqid=0 2024-11-12T19:35:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742392_1568 (size=22361) 2024-11-12T19:35:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,188 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/07fbd840539149168f6dc04de7a42fbb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb 2024-11-12T19:35:28,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,214 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 07fbd840539149168f6dc04de7a42fbb(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:28,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:28,214 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=10, startTime=1731440127692; duration=0sec 2024-11-12T19:35:28,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:28,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:28,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:28,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:28,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,219 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=127, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/a5c43ec287844250bdfefa1b06557355 2024-11-12T19:35:28,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:28,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/7440a0c4733a4c908dd7df6667924941 is 50, key is test_row_0/B:col10/1731440126085/Put/seqid=0 2024-11-12T19:35:28,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440188256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440188259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440188257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440188261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742393_1569 (size=9657) 2024-11-12T19:35:28,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440188363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440188365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440188367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440188368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:28,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440188566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440188569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440188569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440188572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,693 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/7440a0c4733a4c908dd7df6667924941 2024-11-12T19:35:28,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 is 50, key is test_row_0/C:col10/1731440126085/Put/seqid=0 2024-11-12T19:35:28,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742394_1570 (size=9657) 2024-11-12T19:35:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440188871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440188872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440188879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:28,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440188879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,110 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 2024-11-12T19:35:29,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/a5c43ec287844250bdfefa1b06557355 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355 2024-11-12T19:35:29,118 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355, entries=100, sequenceid=127, filesize=21.8 K 2024-11-12T19:35:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/7440a0c4733a4c908dd7df6667924941 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941 2024-11-12T19:35:29,122 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941, entries=100, sequenceid=127, filesize=9.4 K 2024-11-12T19:35:29,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 2024-11-12T19:35:29,126 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6, entries=100, sequenceid=127, filesize=9.4 K 2024-11-12T19:35:29,126 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1330ms, sequenceid=127, compaction requested=false 2024-11-12T19:35:29,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:29,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:29,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-12T19:35:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-12T19:35:29,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-12T19:35:29,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7060 sec 2024-11-12T19:35:29,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.7100 sec 2024-11-12T19:35:29,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:29,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:29,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440189388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440189388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440189390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440189389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111251b33b09aa8c4565852c3db84e3bf6be_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:29,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742395_1571 (size=12304) 2024-11-12T19:35:29,430 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,448 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111251b33b09aa8c4565852c3db84e3bf6be_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111251b33b09aa8c4565852c3db84e3bf6be_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:29,450 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/6aee2d548687410baf624733dce76c52, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:29,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/6aee2d548687410baf624733dce76c52 is 175, key is test_row_0/A:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:29,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742396_1572 (size=31105) 2024-11-12T19:35:29,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440189493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440189493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440189494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440189494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440189695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440189696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440189696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440189697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:29,861 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/6aee2d548687410baf624733dce76c52 2024-11-12T19:35:29,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/f950bb139d73443eb488c2d241bb5520 is 50, key is test_row_0/B:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:29,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742397_1573 (size=12151) 2024-11-12T19:35:29,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/f950bb139d73443eb488c2d241bb5520 2024-11-12T19:35:29,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f758ff5326c74cfd850a852526ec450b is 50, key is test_row_0/C:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742398_1574 (size=12151) 2024-11-12T19:35:29,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f758ff5326c74cfd850a852526ec450b 2024-11-12T19:35:29,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/6aee2d548687410baf624733dce76c52 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52 2024-11-12T19:35:29,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52, entries=150, sequenceid=156, filesize=30.4 K 2024-11-12T19:35:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/f950bb139d73443eb488c2d241bb5520 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520 2024-11-12T19:35:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520, entries=150, sequenceid=156, filesize=11.9 K 2024-11-12T19:35:29,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f758ff5326c74cfd850a852526ec450b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b 2024-11-12T19:35:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:29,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440189999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440190000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b, entries=150, sequenceid=156, filesize=11.9 K 2024-11-12T19:35:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 9487c3b0150a5aa38a9544f87bbf2a6e in 634ms, sequenceid=156, compaction requested=true 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:30,010 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:30,010 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,014 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:30,014 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:30,014 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:30,014 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:30,015 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,015 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=82.6 K 2024-11-12T19:35:30,015 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,015 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52] 2024-11-12T19:35:30,015 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 07fbd840539149168f6dc04de7a42fbb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:30,016 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,017 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7eef6f09ce6e418e8cb581a72aa001c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=33.2 K 2024-11-12T19:35:30,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-12T19:35:30,019 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting a5c43ec287844250bdfefa1b06557355, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440126085 2024-11-12T19:35:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:30,019 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7eef6f09ce6e418e8cb581a72aa001c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:30,019 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6aee2d548687410baf624733dce76c52, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440128246 2024-11-12T19:35:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:30,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,023 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7440a0c4733a4c908dd7df6667924941, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440126085 2024-11-12T19:35:30,023 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f950bb139d73443eb488c2d241bb5520, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440128246 2024-11-12T19:35:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,039 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,040 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#487 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:30,041 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/fa9389f7041c4615a935e9b3df40e9b5 is 50, key is test_row_0/B:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:30,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111276d572e39cf14347b18b405ffc13ad3f_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440129379/Put/seqid=0 2024-11-12T19:35:30,044 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112dbf0ae4f7af545c28f2cce99d88ab2b4_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,046 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112dbf0ae4f7af545c28f2cce99d88ab2b4_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,046 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112dbf0ae4f7af545c28f2cce99d88ab2b4_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742399_1575 (size=12459) 2024-11-12T19:35:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742401_1577 (size=14794) 2024-11-12T19:35:30,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742400_1576 (size=4469) 2024-11-12T19:35:30,096 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,100 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#486 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:30,101 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/947004ae23044b70849162efb2f74aad is 175, key is test_row_0/A:col10/1731440128246/Put/seqid=0 2024-11-12T19:35:30,113 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111276d572e39cf14347b18b405ffc13ad3f_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111276d572e39cf14347b18b405ffc13ad3f_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:30,117 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/472174221a884593bb512ed834c8aa3b, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/472174221a884593bb512ed834c8aa3b is 175, key is test_row_0/A:col10/1731440129379/Put/seqid=0 2024-11-12T19:35:30,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440190120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742402_1578 (size=31413) 2024-11-12T19:35:30,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440190123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742403_1579 (size=39749) 2024-11-12T19:35:30,163 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/472174221a884593bb512ed834c8aa3b 2024-11-12T19:35:30,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/2da858a8fef34bb6bbe172d65dd07a1e is 50, key is test_row_0/B:col10/1731440129379/Put/seqid=0 2024-11-12T19:35:30,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742404_1580 (size=12151) 2024-11-12T19:35:30,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/2da858a8fef34bb6bbe172d65dd07a1e 2024-11-12T19:35:30,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440190228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440190232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/98fbd153d3554c6bbafa362ccd91b9c7 is 50, key is test_row_0/C:col10/1731440129379/Put/seqid=0 2024-11-12T19:35:30,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742405_1581 (size=12151) 2024-11-12T19:35:30,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/98fbd153d3554c6bbafa362ccd91b9c7 2024-11-12T19:35:30,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/472174221a884593bb512ed834c8aa3b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b 2024-11-12T19:35:30,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b, entries=200, sequenceid=167, filesize=38.8 K 2024-11-12T19:35:30,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/2da858a8fef34bb6bbe172d65dd07a1e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e 2024-11-12T19:35:30,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e, entries=150, sequenceid=167, filesize=11.9 K 2024-11-12T19:35:30,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/98fbd153d3554c6bbafa362ccd91b9c7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7 2024-11-12T19:35:30,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7, entries=150, sequenceid=167, filesize=11.9 K 2024-11-12T19:35:30,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9487c3b0150a5aa38a9544f87bbf2a6e in 378ms, sequenceid=167, compaction requested=true 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:30,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-12T19:35:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:30,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:30,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111235db6c6e7e8d4bba9151a22a96c3066c_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742406_1582 (size=14794) 2024-11-12T19:35:30,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440190463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440190469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,486 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/fa9389f7041c4615a935e9b3df40e9b5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/fa9389f7041c4615a935e9b3df40e9b5 2024-11-12T19:35:30,491 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into fa9389f7041c4615a935e9b3df40e9b5(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:30,491 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:30,491 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=13, startTime=1731440130010; duration=0sec 2024-11-12T19:35:30,491 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-12T19:35:30,491 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:30,491 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-12T19:35:30,492 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:30,492 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:30,492 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:30,492 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:30,493 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:30,497 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46166 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:30,497 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:30,497 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,497 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f50243a15ac74fba994ff3ed9da0c699, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=45.1 K 2024-11-12T19:35:30,498 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f50243a15ac74fba994ff3ed9da0c699, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731440125729 2024-11-12T19:35:30,499 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b1d8b3cec1c468d8346d50c3d2ca1f6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731440126085 2024-11-12T19:35:30,499 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f758ff5326c74cfd850a852526ec450b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440128246 2024-11-12T19:35:30,500 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98fbd153d3554c6bbafa362ccd91b9c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731440129379 2024-11-12T19:35:30,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440190508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440190509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,517 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#492 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:30,517 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f597621b57634d52b1933ed984b95d33 is 50, key is test_row_0/C:col10/1731440129379/Put/seqid=0 2024-11-12T19:35:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-12T19:35:30,525 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-12T19:35:30,526 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-12T19:35:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:30,528 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:30,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:30,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:30,536 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/947004ae23044b70849162efb2f74aad as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad 2024-11-12T19:35:30,542 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 947004ae23044b70849162efb2f74aad(size=30.7 K), total size for store is 69.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:30,542 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=13, startTime=1731440130010; duration=0sec 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-12T19:35:30,542 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:30,543 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:30,543 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:30,543 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:30,543 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-12T19:35:30,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:30,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:30,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:30,544 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:30,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742407_1583 (size=12493) 2024-11-12T19:35:30,557 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f597621b57634d52b1933ed984b95d33 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f597621b57634d52b1933ed984b95d33 2024-11-12T19:35:30,562 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into f597621b57634d52b1933ed984b95d33(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:30,562 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:30,562 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=12, startTime=1731440130397; duration=0sec 2024-11-12T19:35:30,562 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:30,562 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:30,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440190570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440190579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440190607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,610 DEBUG [Thread-2407 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., hostname=81d69e608036,33067,1731439956493, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T19:35:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:30,683 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:30,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:30,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:30,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440190783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440190786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:30,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:30,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:30,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:30,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:30,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:30,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:30,869 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:30,880 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111235db6c6e7e8d4bba9151a22a96c3066c_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111235db6c6e7e8d4bba9151a22a96c3066c_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:30,883 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/b6531ae6d0c14ee1a38c14b1b774f5d7, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:30,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/b6531ae6d0c14ee1a38c14b1b774f5d7 is 175, key is test_row_0/A:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:30,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742408_1584 (size=39749) 2024-11-12T19:35:30,922 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/b6531ae6d0c14ee1a38c14b1b774f5d7 2024-11-12T19:35:30,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/38e61aabdd2543b793901ef13cc3b5a7 is 50, key is test_row_0/B:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:30,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742409_1585 (size=12151) 2024-11-12T19:35:30,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/38e61aabdd2543b793901ef13cc3b5a7 2024-11-12T19:35:30,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/d8cf3e869e4a4907963db1aa9b36d82e is 50, key is test_row_0/C:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:31,005 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742410_1586 (size=12151) 2024-11-12T19:35:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440191087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440191089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:31,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,320 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/d8cf3e869e4a4907963db1aa9b36d82e 2024-11-12T19:35:31,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/b6531ae6d0c14ee1a38c14b1b774f5d7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7 2024-11-12T19:35:31,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7, entries=200, sequenceid=194, filesize=38.8 K 2024-11-12T19:35:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/38e61aabdd2543b793901ef13cc3b5a7 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7 2024-11-12T19:35:31,485 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7, entries=150, sequenceid=194, filesize=11.9 K 2024-11-12T19:35:31,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/d8cf3e869e4a4907963db1aa9b36d82e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e 2024-11-12T19:35:31,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e, entries=150, sequenceid=194, filesize=11.9 K 2024-11-12T19:35:31,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1076ms, sequenceid=194, compaction requested=true 2024-11-12T19:35:31,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:31,512 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:31,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:31,513 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110911 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:31,513 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:31,513 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,513 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=108.3 K 2024-11-12T19:35:31,513 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,514 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7] 2024-11-12T19:35:31,514 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 947004ae23044b70849162efb2f74aad, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440128246 2024-11-12T19:35:31,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:31,516 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 472174221a884593bb512ed834c8aa3b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731440129379 2024-11-12T19:35:31,516 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:31,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:31,517 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6531ae6d0c14ee1a38c14b1b774f5d7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731440130108 2024-11-12T19:35:31,518 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:31,518 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:31,518 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,518 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/fa9389f7041c4615a935e9b3df40e9b5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=35.9 K 2024-11-12T19:35:31,519 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting fa9389f7041c4615a935e9b3df40e9b5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731440128246 2024-11-12T19:35:31,520 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 2da858a8fef34bb6bbe172d65dd07a1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731440129379 2024-11-12T19:35:31,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:31,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:31,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:31,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:31,521 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e61aabdd2543b793901ef13cc3b5a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731440130108 2024-11-12T19:35:31,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-12T19:35:31,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:31,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:31,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:31,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:31,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:31,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:31,541 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#495 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:31,541 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/901c536d8efa424bb57bba90119709ce is 50, key is test_row_0/B:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:31,542 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:31,548 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411124d5fe0d86f3b440ca380d1a75d7b9443_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:31,551 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411124d5fe0d86f3b440ca380d1a75d7b9443_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:31,551 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411124d5fe0d86f3b440ca380d1a75d7b9443_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:31,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112cc739871163846d8b2049e4788847392_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440130452/Put/seqid=0 2024-11-12T19:35:31,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742411_1587 (size=12561) 2024-11-12T19:35:31,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742412_1588 (size=4469) 2024-11-12T19:35:31,596 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/901c536d8efa424bb57bba90119709ce as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/901c536d8efa424bb57bba90119709ce 2024-11-12T19:35:31,599 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#496 average throughput is 0.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:31,600 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/8fa3a12dae184876a6ce6272759d3be4 is 175, key is test_row_0/A:col10/1731440130108/Put/seqid=0 2024-11-12T19:35:31,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742413_1589 (size=12304) 2024-11-12T19:35:31,603 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:31,604 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into 901c536d8efa424bb57bba90119709ce(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:31,604 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:31,604 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=13, startTime=1731440131516; duration=0sec 2024-11-12T19:35:31,604 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:31,604 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:31,604 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-12T19:35:31,605 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:31,605 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:31,605 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:31,605 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742414_1590 (size=31515) 2024-11-12T19:35:31,611 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112cc739871163846d8b2049e4788847392_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112cc739871163846d8b2049e4788847392_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:31,612 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/34d108e4bbf34a4ebe881e44b15828c8, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:31,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/34d108e4bbf34a4ebe881e44b15828c8 is 175, key is test_row_0/A:col10/1731440130452/Put/seqid=0 2024-11-12T19:35:31,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742415_1591 (size=31105) 2024-11-12T19:35:31,634 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/34d108e4bbf34a4ebe881e44b15828c8 2024-11-12T19:35:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:31,640 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440191635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440191635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440191637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440191637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/484b5735902c4f7d9419000fbc627b46 is 50, key is test_row_0/B:col10/1731440130452/Put/seqid=0 2024-11-12T19:35:31,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742416_1592 (size=12151) 2024-11-12T19:35:31,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440191742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440191744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440191744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440191747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440191947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440191951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440191951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:31,956 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:31,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:31,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:31,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:31,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:31,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440191955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,034 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/8fa3a12dae184876a6ce6272759d3be4 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4 2024-11-12T19:35:32,052 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 8fa3a12dae184876a6ce6272759d3be4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:32,052 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:32,052 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=13, startTime=1731440131512; duration=0sec 2024-11-12T19:35:32,052 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:32,052 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:32,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/484b5735902c4f7d9419000fbc627b46 2024-11-12T19:35:32,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/cf40548692584de1aac3b03393d8ea5c is 50, key is test_row_0/C:col10/1731440130452/Put/seqid=0 2024-11-12T19:35:32,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:32,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:32,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:32,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742417_1593 (size=12151) 2024-11-12T19:35:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440192252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440192255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440192256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440192261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:32,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:32,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:32,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:32,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:32,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/cf40548692584de1aac3b03393d8ea5c 2024-11-12T19:35:32,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/34d108e4bbf34a4ebe881e44b15828c8 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8 2024-11-12T19:35:32,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8, entries=150, sequenceid=210, filesize=30.4 K 2024-11-12T19:35:32,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/484b5735902c4f7d9419000fbc627b46 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46 2024-11-12T19:35:32,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46, entries=150, sequenceid=210, filesize=11.9 K 2024-11-12T19:35:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/cf40548692584de1aac3b03393d8ea5c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c 2024-11-12T19:35:32,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c, entries=150, sequenceid=210, filesize=11.9 K 2024-11-12T19:35:32,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1036ms, sequenceid=210, compaction requested=true 2024-11-12T19:35:32,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:32,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:32,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:32,559 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-12T19:35:32,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:32,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-12T19:35:32,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:32,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. because compaction request was cancelled 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:32,560 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:32,561 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:32,561 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:32,561 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,561 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f597621b57634d52b1933ed984b95d33, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=35.9 K 2024-11-12T19:35:32,561 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f597621b57634d52b1933ed984b95d33, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731440129379 2024-11-12T19:35:32,561 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8cf3e869e4a4907963db1aa9b36d82e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731440130108 2024-11-12T19:35:32,561 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf40548692584de1aac3b03393d8ea5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731440130452 2024-11-12T19:35:32,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:32,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:32,569 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:32,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:32,570 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:32,571 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/aeb5a51d5a6244519adf77280ea85fe1 is 50, key is test_row_0/C:col10/1731440130452/Put/seqid=0 2024-11-12T19:35:32,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742418_1594 (size=12595) 2024-11-12T19:35:32,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112614a57468fe7468f9b4570882c558971_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440131627/Put/seqid=0 2024-11-12T19:35:32,601 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/aeb5a51d5a6244519adf77280ea85fe1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/aeb5a51d5a6244519adf77280ea85fe1 2024-11-12T19:35:32,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742419_1595 (size=12304) 2024-11-12T19:35:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:32,606 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into aeb5a51d5a6244519adf77280ea85fe1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:32,606 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:32,606 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=13, startTime=1731440132560; duration=0sec 2024-11-12T19:35:32,606 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112614a57468fe7468f9b4570882c558971_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112614a57468fe7468f9b4570882c558971_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:32,607 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:32,607 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:32,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/132059ce740f4c4ca4d08f69a5ac5526, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:32,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/132059ce740f4c4ca4d08f69a5ac5526 is 175, key is test_row_0/A:col10/1731440131627/Put/seqid=0 2024-11-12T19:35:32,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742420_1596 (size=31105) 2024-11-12T19:35:32,612 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/132059ce740f4c4ca4d08f69a5ac5526 2024-11-12T19:35:32,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/51af0b237f0d4d198bc823ec6c935642 is 50, key is test_row_0/B:col10/1731440131627/Put/seqid=0 2024-11-12T19:35:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742421_1597 (size=12151) 2024-11-12T19:35:32,624 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/51af0b237f0d4d198bc823ec6c935642 2024-11-12T19:35:32,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/989458e246c14a3d85fa52471640a358 is 50, key is test_row_0/C:col10/1731440131627/Put/seqid=0 2024-11-12T19:35:32,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742422_1598 (size=12151) 2024-11-12T19:35:32,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:32,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:32,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:32,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440192766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440192767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440192768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440192768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440192871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440192871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440192872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:32,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:32,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440192872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,035 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/989458e246c14a3d85fa52471640a358 2024-11-12T19:35:33,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/132059ce740f4c4ca4d08f69a5ac5526 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526 2024-11-12T19:35:33,041 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526, entries=150, sequenceid=235, filesize=30.4 K 2024-11-12T19:35:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/51af0b237f0d4d198bc823ec6c935642 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642 2024-11-12T19:35:33,044 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642, entries=150, sequenceid=235, filesize=11.9 K 2024-11-12T19:35:33,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/989458e246c14a3d85fa52471640a358 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358 2024-11-12T19:35:33,046 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358, entries=150, sequenceid=235, filesize=11.9 K 2024-11-12T19:35:33,047 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9487c3b0150a5aa38a9544f87bbf2a6e in 478ms, sequenceid=235, compaction requested=true 2024-11-12T19:35:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-12T19:35:33,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-12T19:35:33,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-12T19:35:33,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5180 sec 2024-11-12T19:35:33,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.5230 sec 2024-11-12T19:35:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:33,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:33,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:33,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a2d66723aa9447a88c2dca65493ce772_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:33,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742423_1599 (size=17284) 2024-11-12T19:35:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440193090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440193092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440193093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440193093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440193194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440193196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440193196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440193196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440193398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440193399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440193399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440193399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,486 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:33,488 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112a2d66723aa9447a88c2dca65493ce772_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a2d66723aa9447a88c2dca65493ce772_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:33,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e78cb6fc0c2469d9330515e413b5d6e, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:33,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e78cb6fc0c2469d9330515e413b5d6e is 175, key is test_row_0/A:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:33,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742424_1600 (size=48389) 2024-11-12T19:35:33,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440193700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440193702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440193704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440193704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:33,893 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e78cb6fc0c2469d9330515e413b5d6e 2024-11-12T19:35:33,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/664f74bb44a4470f957dd8b533c8cdae is 50, key is test_row_0/B:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:33,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742425_1601 (size=12151) 2024-11-12T19:35:33,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/664f74bb44a4470f957dd8b533c8cdae 2024-11-12T19:35:33,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/e03b9aa7e025433189df61d13f183f76 is 50, key is test_row_0/C:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:33,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742426_1602 (size=12151) 2024-11-12T19:35:34,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T19:35:34,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440194202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440194207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440194208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:34,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:34,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440194211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:34,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/e03b9aa7e025433189df61d13f183f76 2024-11-12T19:35:34,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e78cb6fc0c2469d9330515e413b5d6e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e 2024-11-12T19:35:34,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e, entries=250, sequenceid=251, filesize=47.3 K 2024-11-12T19:35:34,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/664f74bb44a4470f957dd8b533c8cdae as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae 2024-11-12T19:35:34,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae, entries=150, sequenceid=251, filesize=11.9 K 2024-11-12T19:35:34,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/e03b9aa7e025433189df61d13f183f76 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76 2024-11-12T19:35:34,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76, entries=150, sequenceid=251, filesize=11.9 K 2024-11-12T19:35:34,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1245ms, sequenceid=251, compaction requested=true 2024-11-12T19:35:34,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:34,321 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:34,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:34,321 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142114 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:34,323 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:34,323 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:34,323 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/901c536d8efa424bb57bba90119709ce, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=47.9 K 2024-11-12T19:35:34,323 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=138.8 K 2024-11-12T19:35:34,323 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e] 2024-11-12T19:35:34,323 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 901c536d8efa424bb57bba90119709ce, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731440130108 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fa3a12dae184876a6ce6272759d3be4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731440130108 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 484b5735902c4f7d9419000fbc627b46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731440130452 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34d108e4bbf34a4ebe881e44b15828c8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731440130452 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 51af0b237f0d4d198bc823ec6c935642, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731440131627 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 132059ce740f4c4ca4d08f69a5ac5526, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731440131627 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 664f74bb44a4470f957dd8b533c8cdae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:34,324 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e78cb6fc0c2469d9330515e413b5d6e, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:34,329 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:34,331 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112e0d6d02455984dc5b41cadf7fac2923b_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:34,331 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#507 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:34,331 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/27c3e80795de41f0b451fa7db8214e8c is 50, key is test_row_0/B:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:34,332 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112e0d6d02455984dc5b41cadf7fac2923b_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:34,333 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112e0d6d02455984dc5b41cadf7fac2923b_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:34,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742427_1603 (size=12697) 2024-11-12T19:35:34,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742428_1604 (size=4469) 2024-11-12T19:35:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-12T19:35:34,637 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-12T19:35:34,637 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:34,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-12T19:35:34,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:34,639 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:34,639 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:34,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:34,752 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#508 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:34,752 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9dad5d6f1f3d4173a78178296ee371c0 is 175, key is test_row_0/A:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:34,754 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/27c3e80795de41f0b451fa7db8214e8c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/27c3e80795de41f0b451fa7db8214e8c 2024-11-12T19:35:34,759 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into 27c3e80795de41f0b451fa7db8214e8c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:34,759 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:34,759 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=12, startTime=1731440134321; duration=0sec 2024-11-12T19:35:34,759 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:34,759 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:34,760 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:34,760 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:34,760 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:34,760 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:34,760 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/aeb5a51d5a6244519adf77280ea85fe1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=36.0 K 2024-11-12T19:35:34,761 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting aeb5a51d5a6244519adf77280ea85fe1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731440130452 2024-11-12T19:35:34,761 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 989458e246c14a3d85fa52471640a358, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731440131627 2024-11-12T19:35:34,761 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting e03b9aa7e025433189df61d13f183f76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:34,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742429_1605 (size=31651) 2024-11-12T19:35:34,776 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#509 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:34,777 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/dd2506c1ff1d401fa7729b075134384f is 50, key is test_row_0/C:col10/1731440133075/Put/seqid=0 2024-11-12T19:35:34,779 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/9dad5d6f1f3d4173a78178296ee371c0 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0 2024-11-12T19:35:34,785 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 9dad5d6f1f3d4173a78178296ee371c0(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:34,785 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:34,785 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=12, startTime=1731440134320; duration=0sec 2024-11-12T19:35:34,785 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:34,785 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:34,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742430_1606 (size=12697) 2024-11-12T19:35:34,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:34,792 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/dd2506c1ff1d401fa7729b075134384f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/dd2506c1ff1d401fa7729b075134384f 2024-11-12T19:35:34,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-12T19:35:34,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:34,792 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-12T19:35:34,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:34,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:34,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:34,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:34,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:34,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:34,798 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into dd2506c1ff1d401fa7729b075134384f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:34,798 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:34,799 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=13, startTime=1731440134321; duration=0sec 2024-11-12T19:35:34,799 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:34,799 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111206c245cda26e4ea99073934d7bd14fd9_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440133090/Put/seqid=0 2024-11-12T19:35:34,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742431_1607 (size=12454) 2024-11-12T19:35:34,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:34,835 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111206c245cda26e4ea99073934d7bd14fd9_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111206c245cda26e4ea99073934d7bd14fd9_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:34,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e5a2276abc3411898ae7e78a2b7f172, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:34,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e5a2276abc3411898ae7e78a2b7f172 is 175, key is test_row_0/A:col10/1731440133090/Put/seqid=0 2024-11-12T19:35:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742432_1608 (size=31255) 2024-11-12T19:35:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:35,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:35,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440195237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440195238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440195242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440195243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,255 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e5a2276abc3411898ae7e78a2b7f172 2024-11-12T19:35:35,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/35926df7e443488d8ea8da900b8ef04c is 50, key is test_row_0/B:col10/1731440133090/Put/seqid=0 2024-11-12T19:35:35,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742433_1609 (size=12301) 2024-11-12T19:35:35,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440195343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440195345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440195346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440195348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440195551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440195551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440195552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440195559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,689 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/35926df7e443488d8ea8da900b8ef04c 2024-11-12T19:35:35,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/5b5923dc058f49a487ca933052ee7383 is 50, key is test_row_0/C:col10/1731440133090/Put/seqid=0 2024-11-12T19:35:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:35,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742434_1610 (size=12301) 2024-11-12T19:35:35,771 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/5b5923dc058f49a487ca933052ee7383 2024-11-12T19:35:35,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/5e5a2276abc3411898ae7e78a2b7f172 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172 2024-11-12T19:35:35,826 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172, entries=150, sequenceid=274, filesize=30.5 K 2024-11-12T19:35:35,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/35926df7e443488d8ea8da900b8ef04c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c 2024-11-12T19:35:35,834 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c, entries=150, sequenceid=274, filesize=12.0 K 2024-11-12T19:35:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/5b5923dc058f49a487ca933052ee7383 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383 2024-11-12T19:35:35,843 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383, entries=150, sequenceid=274, filesize=12.0 K 2024-11-12T19:35:35,844 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1052ms, sequenceid=274, compaction requested=false 2024-11-12T19:35:35,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:35,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:35,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-12T19:35:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-12T19:35:35,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-12T19:35:35,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2060 sec 2024-11-12T19:35:35,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.2090 sec 2024-11-12T19:35:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:35,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-12T19:35:35,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:35,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112fabd77123b5e4720ba675c2fa44edc97_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:35,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742435_1611 (size=14994) 2024-11-12T19:35:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440195880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440195879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440195880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440195881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440195984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440195984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440195985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:35,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440195985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440196187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440196187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440196188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440196188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,273 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:36,277 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112fabd77123b5e4720ba675c2fa44edc97_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112fabd77123b5e4720ba675c2fa44edc97_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:36,280 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/acef769068194dc6854ffb8685cd81a6, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:36,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/acef769068194dc6854ffb8685cd81a6 is 175, key is test_row_0/A:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:36,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742436_1612 (size=39949) 2024-11-12T19:35:36,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440196490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440196491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440196492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440196503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/acef769068194dc6854ffb8685cd81a6 2024-11-12T19:35:36,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/74d19d0c413a446987faab342137adf9 is 50, key is test_row_0/B:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-12T19:35:36,748 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-12T19:35:36,752 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-12T19:35:36,753 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:36,754 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:36,754 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:36,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742437_1613 (size=12301) 2024-11-12T19:35:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:36,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:36,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-12T19:35:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:36,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:36,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440196995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440196996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440196997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:37,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440197008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:37,059 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:37,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-12T19:35:37,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:37,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:37,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:37,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/74d19d0c413a446987faab342137adf9 2024-11-12T19:35:37,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f7eefab1b1f840e69c098c514d34a664 is 50, key is test_row_0/C:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:37,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742438_1614 (size=12301) 2024-11-12T19:35:37,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f7eefab1b1f840e69c098c514d34a664 2024-11-12T19:35:37,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/acef769068194dc6854ffb8685cd81a6 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6 2024-11-12T19:35:37,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6, entries=200, sequenceid=292, filesize=39.0 K 2024-11-12T19:35:37,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/74d19d0c413a446987faab342137adf9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9 2024-11-12T19:35:37,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9, entries=150, sequenceid=292, filesize=12.0 K 2024-11-12T19:35:37,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/f7eefab1b1f840e69c098c514d34a664 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664 2024-11-12T19:35:37,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664, entries=150, sequenceid=292, filesize=12.0 K 2024-11-12T19:35:37,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1353ms, sequenceid=292, compaction requested=true 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:37,211 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:37,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-12T19:35:37,212 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:37,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,212 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:37,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:37,212 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:37,213 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:37,213 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102855 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:37,213 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:37,213 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:37,213 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,213 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,213 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/27c3e80795de41f0b451fa7db8214e8c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=36.4 K 2024-11-12T19:35:37,213 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=100.4 K 2024-11-12T19:35:37,213 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,213 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6] 2024-11-12T19:35:37,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27c3e80795de41f0b451fa7db8214e8c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:37,214 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35926df7e443488d8ea8da900b8ef04c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731440133088 2024-11-12T19:35:37,214 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dad5d6f1f3d4173a78178296ee371c0, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:37,215 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74d19d0c413a446987faab342137adf9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:37,215 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e5a2276abc3411898ae7e78a2b7f172, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731440133088 2024-11-12T19:35:37,215 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting acef769068194dc6854ffb8685cd81a6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:37,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111282c64ccc643a4298bfbe79cf9ca134c1_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440135879/Put/seqid=0 2024-11-12T19:35:37,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742439_1615 (size=12454) 2024-11-12T19:35:37,222 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#517 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:37,222 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/6ec7fbe0b22549b4afad04ec7c801d81 is 50, key is test_row_0/B:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:37,223 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:37,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742440_1616 (size=12949) 2024-11-12T19:35:37,231 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112ae2d014323ef4a538a11ce3f59793aac_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:37,233 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112ae2d014323ef4a538a11ce3f59793aac_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:37,233 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ae2d014323ef4a538a11ce3f59793aac_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:37,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742441_1617 (size=4469) 2024-11-12T19:35:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:37,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:37,639 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111282c64ccc643a4298bfbe79cf9ca134c1_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282c64ccc643a4298bfbe79cf9ca134c1_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:37,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7868c95b9b964e09bbad2eaead237e9f, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7868c95b9b964e09bbad2eaead237e9f is 175, key is test_row_0/A:col10/1731440135879/Put/seqid=0 2024-11-12T19:35:37,643 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#518 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:37,643 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7b32cd39fb9f498f942f9d18a01ef26e is 175, key is test_row_0/A:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:37,644 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/6ec7fbe0b22549b4afad04ec7c801d81 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6ec7fbe0b22549b4afad04ec7c801d81 2024-11-12T19:35:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742443_1619 (size=31903) 2024-11-12T19:35:37,675 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into 6ec7fbe0b22549b4afad04ec7c801d81(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:37,675 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:37,675 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=13, startTime=1731440137211; duration=0sec 2024-11-12T19:35:37,675 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:37,675 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:37,675 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:37,679 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:37,679 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:37,679 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:37,679 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/dd2506c1ff1d401fa7729b075134384f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=36.4 K 2024-11-12T19:35:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742442_1618 (size=31255) 2024-11-12T19:35:37,680 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd2506c1ff1d401fa7729b075134384f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731440132762 2024-11-12T19:35:37,680 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b5923dc058f49a487ca933052ee7383, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731440133088 2024-11-12T19:35:37,680 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7eefab1b1f840e69c098c514d34a664, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:37,691 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#519 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:37,691 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/bf63bc594b8443898d52464b5b221065 is 50, key is test_row_0/C:col10/1731440135857/Put/seqid=0 2024-11-12T19:35:37,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742444_1620 (size=12949) 2024-11-12T19:35:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:38,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:38,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440198041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440198042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440198043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440198045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,079 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7b32cd39fb9f498f942f9d18a01ef26e as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e 2024-11-12T19:35:38,080 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7868c95b9b964e09bbad2eaead237e9f 2024-11-12T19:35:38,092 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 7b32cd39fb9f498f942f9d18a01ef26e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:38,092 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:38,092 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=13, startTime=1731440137211; duration=0sec 2024-11-12T19:35:38,092 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:38,092 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:38,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d18164f5f0e0430e98ecd7073b4d368b is 50, key is test_row_0/B:col10/1731440135879/Put/seqid=0 2024-11-12T19:35:38,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742445_1621 (size=12301) 2024-11-12T19:35:38,128 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/bf63bc594b8443898d52464b5b221065 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/bf63bc594b8443898d52464b5b221065 2024-11-12T19:35:38,134 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into bf63bc594b8443898d52464b5b221065(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:38,134 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:38,134 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=13, startTime=1731440137211; duration=0sec 2024-11-12T19:35:38,134 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:38,134 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:38,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440198146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440198147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440198153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440198154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440198348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440198349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440198356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440198356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,522 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d18164f5f0e0430e98ecd7073b4d368b 2024-11-12T19:35:38,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2cfadbedd6d34f1985b00d17a6c6fced is 50, key is test_row_0/C:col10/1731440135879/Put/seqid=0 2024-11-12T19:35:38,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742446_1622 (size=12301) 2024-11-12T19:35:38,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440198652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440198654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440198658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:38,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440198659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:38,930 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2cfadbedd6d34f1985b00d17a6c6fced 2024-11-12T19:35:38,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/7868c95b9b964e09bbad2eaead237e9f as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f 2024-11-12T19:35:38,935 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f, entries=150, sequenceid=311, filesize=30.5 K 2024-11-12T19:35:38,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/d18164f5f0e0430e98ecd7073b4d368b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b 2024-11-12T19:35:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,938 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b, entries=150, sequenceid=311, filesize=12.0 K 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2cfadbedd6d34f1985b00d17a6c6fced as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced, entries=150, sequenceid=311, filesize=12.0 K 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1731ms, sequenceid=311, compaction requested=false 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:38,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:38,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1900 sec 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.1930 sec 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:35:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ac35f7f567b54132a5a29d8915a930dd_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742447_1623 (size=14994) 2024-11-12T19:35:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440199194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440199195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440199198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440199201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440199303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440199303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440199306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440199306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440199506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440199506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440199509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440199510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,582 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:39,584 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112ac35f7f567b54132a5a29d8915a930dd_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ac35f7f567b54132a5a29d8915a930dd_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:39,585 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/bf3b5c05b62748ba8eb31cac14ecf244, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:39,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/bf3b5c05b62748ba8eb31cac14ecf244 is 175, key is test_row_0/A:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:39,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742448_1624 (size=39945) 2024-11-12T19:35:39,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440199808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440199810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440199812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440199812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:39,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/bf3b5c05b62748ba8eb31cac14ecf244 2024-11-12T19:35:39,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c4647d20e5874a4a976b53e562c8d1ec is 50, key is test_row_0/B:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:40,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742449_1625 (size=9857) 2024-11-12T19:35:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440200315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440200315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440200315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440200316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c4647d20e5874a4a976b53e562c8d1ec 2024-11-12T19:35:40,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/6244c6dd95324008b6208661e0071c5c is 50, key is test_row_0/C:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742450_1626 (size=9857) 2024-11-12T19:35:40,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/6244c6dd95324008b6208661e0071c5c 2024-11-12T19:35:40,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/bf3b5c05b62748ba8eb31cac14ecf244 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244 2024-11-12T19:35:40,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244, entries=200, sequenceid=333, filesize=39.0 K 2024-11-12T19:35:40,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c4647d20e5874a4a976b53e562c8d1ec as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec 2024-11-12T19:35:40,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec, entries=100, sequenceid=333, filesize=9.6 K 2024-11-12T19:35:40,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/6244c6dd95324008b6208661e0071c5c as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c 2024-11-12T19:35:40,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c, entries=100, sequenceid=333, filesize=9.6 K 2024-11-12T19:35:40,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1261ms, sequenceid=333, compaction requested=true 2024-11-12T19:35:40,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:40,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:A, priority=-2147483648, current under compaction store size is 1 2024-11-12T19:35:40,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:40,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:B, priority=-2147483648, current under compaction store size is 2 2024-11-12T19:35:40,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:40,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9487c3b0150a5aa38a9544f87bbf2a6e:C, priority=-2147483648, current under compaction store size is 3 2024-11-12T19:35:40,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/B is initiating minor compaction (all files) 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103103 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:40,429 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/B in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:40,429 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/A is initiating minor compaction (all files) 2024-11-12T19:35:40,429 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/A in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:40,430 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=100.7 K 2024-11-12T19:35:40,430 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6ec7fbe0b22549b4afad04ec7c801d81, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=34.3 K 2024-11-12T19:35:40,430 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. files: [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244] 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ec7fbe0b22549b4afad04ec7c801d81, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b32cd39fb9f498f942f9d18a01ef26e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting d18164f5f0e0430e98ecd7073b4d368b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1731440135874 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7868c95b9b964e09bbad2eaead237e9f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1731440135874 2024-11-12T19:35:40,430 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting c4647d20e5874a4a976b53e562c8d1ec, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1731440139166 2024-11-12T19:35:40,431 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf3b5c05b62748ba8eb31cac14ecf244, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1731440138040 2024-11-12T19:35:40,434 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:40,436 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241112fdb0d78c175f41bcb4f2844af9237496_9487c3b0150a5aa38a9544f87bbf2a6e store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:40,436 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:40,437 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/b3e066953b1e48cebc7c5917b41647e9 is 50, key is test_row_0/B:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:40,438 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241112fdb0d78c175f41bcb4f2844af9237496_9487c3b0150a5aa38a9544f87bbf2a6e, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:40,438 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112fdb0d78c175f41bcb4f2844af9237496_9487c3b0150a5aa38a9544f87bbf2a6e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742451_1627 (size=13051) 2024-11-12T19:35:40,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742452_1628 (size=4469) 2024-11-12T19:35:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:40,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:40,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:40,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125fb44fa1ca6d4bc884df3040753f49b7_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440140652/Put/seqid=0 2024-11-12T19:35:40,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742453_1629 (size=14994) 2024-11-12T19:35:40,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440200702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440200805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:40,848 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#A#compaction#525 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:40,849 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/398c3da064004487b614bbd3889de6c1 is 175, key is test_row_0/A:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:40,852 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/b3e066953b1e48cebc7c5917b41647e9 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/b3e066953b1e48cebc7c5917b41647e9 2024-11-12T19:35:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742454_1630 (size=32112) 2024-11-12T19:35:40,855 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/B of 9487c3b0150a5aa38a9544f87bbf2a6e into b3e066953b1e48cebc7c5917b41647e9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:40,855 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:40,855 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/B, priority=13, startTime=1731440140429; duration=0sec 2024-11-12T19:35:40,855 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-12T19:35:40,855 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:B 2024-11-12T19:35:40,855 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T19:35:40,856 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T19:35:40,856 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1540): 9487c3b0150a5aa38a9544f87bbf2a6e/C is initiating minor compaction (all files) 2024-11-12T19:35:40,856 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9487c3b0150a5aa38a9544f87bbf2a6e/C in TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:40,856 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/bf63bc594b8443898d52464b5b221065, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c] into tmpdir=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp, totalSize=34.3 K 2024-11-12T19:35:40,857 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting bf63bc594b8443898d52464b5b221065, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731440135229 2024-11-12T19:35:40,857 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cfadbedd6d34f1985b00d17a6c6fced, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1731440135874 2024-11-12T19:35:40,858 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] compactions.Compactor(224): Compacting 6244c6dd95324008b6208661e0071c5c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1731440139166 2024-11-12T19:35:40,864 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9487c3b0150a5aa38a9544f87bbf2a6e#C#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T19:35:40,864 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/d0f0fe47cd544b36b21a83bc94a45bf1 is 50, key is test_row_0/C:col10/1731440139167/Put/seqid=0 2024-11-12T19:35:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-12T19:35:40,866 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-12T19:35:40,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-12T19:35:40,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742455_1631 (size=13051) 2024-11-12T19:35:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-12T19:35:40,869 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-12T19:35:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:40,870 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T19:35:40,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T19:35:40,873 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/d0f0fe47cd544b36b21a83bc94a45bf1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d0f0fe47cd544b36b21a83bc94a45bf1 2024-11-12T19:35:40,877 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/C of 9487c3b0150a5aa38a9544f87bbf2a6e into d0f0fe47cd544b36b21a83bc94a45bf1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:40,877 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:40,877 INFO [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/C, priority=13, startTime=1731440140429; duration=0sec 2024-11-12T19:35:40,877 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:40,877 DEBUG [RS:0;81d69e608036:33067-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:C 2024-11-12T19:35:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:40,991 DEBUG [Thread-2410 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a5ecd59 to 127.0.0.1:60358 2024-11-12T19:35:40,991 DEBUG [Thread-2410 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:40,992 DEBUG [Thread-2414 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560a8819 to 127.0.0.1:60358 2024-11-12T19:35:40,992 DEBUG [Thread-2414 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:40,992 DEBUG [Thread-2412 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61da8c1c to 127.0.0.1:60358 2024-11-12T19:35:40,992 DEBUG [Thread-2412 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:40,993 DEBUG [Thread-2416 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3df30e37 to 127.0.0.1:60358 2024-11-12T19:35:40,993 DEBUG [Thread-2416 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:40,994 DEBUG [Thread-2418 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x745bf218 to 127.0.0.1:60358 2024-11-12T19:35:40,994 DEBUG [Thread-2418 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:41,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440201010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,021 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,022 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,079 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:41,085 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411125fb44fa1ca6d4bc884df3040753f49b7_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125fb44fa1ca6d4bc884df3040753f49b7_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:41,086 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/01d6421ad47a48a08153c08c2673a3d5, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:41,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/01d6421ad47a48a08153c08c2673a3d5 is 175, key is test_row_0/A:col10/1731440140652/Put/seqid=0 2024-11-12T19:35:41,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742456_1632 (size=39949) 2024-11-12T19:35:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:41,174 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,261 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/398c3da064004487b614bbd3889de6c1 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/398c3da064004487b614bbd3889de6c1 2024-11-12T19:35:41,267 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9487c3b0150a5aa38a9544f87bbf2a6e/A of 9487c3b0150a5aa38a9544f87bbf2a6e into 398c3da064004487b614bbd3889de6c1(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T19:35:41,267 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:41,267 INFO [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e., storeName=9487c3b0150a5aa38a9544f87bbf2a6e/A, priority=13, startTime=1731440140428; duration=0sec 2024-11-12T19:35:41,267 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T19:35:41,267 DEBUG [RS:0;81d69e608036:33067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9487c3b0150a5aa38a9544f87bbf2a6e:A 2024-11-12T19:35:41,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440201314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53400 deadline: 1731440201320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53450 deadline: 1731440201322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53442 deadline: 1731440201324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53446 deadline: 1731440201327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:41,481 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,491 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=351, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/01d6421ad47a48a08153c08c2673a3d5 2024-11-12T19:35:41,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c3b094cdf6cb4ec39407d81709ef8d41 is 50, key is test_row_0/B:col10/1731440140652/Put/seqid=0 2024-11-12T19:35:41,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742457_1633 (size=12301) 2024-11-12T19:35:41,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-12T19:35:41,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:53426 deadline: 1731440201816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 2024-11-12T19:35:41,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c3b094cdf6cb4ec39407d81709ef8d41 2024-11-12T19:35:41,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1769c542caaa4eac91b564b47cc3dd49 is 50, key is test_row_0/C:col10/1731440140652/Put/seqid=0 2024-11-12T19:35:41,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742458_1634 (size=12301) 2024-11-12T19:35:41,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:41,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:41,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:41,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:41,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:42,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:42,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:42,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:42,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:42,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:42,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,248 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:42,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:42,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:42,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:42,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:42,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:42,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1769c542caaa4eac91b564b47cc3dd49 2024-11-12T19:35:42,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/01d6421ad47a48a08153c08c2673a3d5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/01d6421ad47a48a08153c08c2673a3d5 2024-11-12T19:35:42,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/01d6421ad47a48a08153c08c2673a3d5, entries=200, sequenceid=351, filesize=39.0 K 2024-11-12T19:35:42,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/c3b094cdf6cb4ec39407d81709ef8d41 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c3b094cdf6cb4ec39407d81709ef8d41 2024-11-12T19:35:42,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c3b094cdf6cb4ec39407d81709ef8d41, entries=150, sequenceid=351, filesize=12.0 K 2024-11-12T19:35:42,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/1769c542caaa4eac91b564b47cc3dd49 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1769c542caaa4eac91b564b47cc3dd49 2024-11-12T19:35:42,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1769c542caaa4eac91b564b47cc3dd49, entries=150, sequenceid=351, filesize=12.0 K 2024-11-12T19:35:42,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1668ms, sequenceid=351, compaction requested=false 2024-11-12T19:35:42,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:42,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:42,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33067 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-12T19:35:42,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:42,404 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:42,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:42,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d3d1aaad67d9466aabd2af421a5a5b52_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440140692/Put/seqid=0 2024-11-12T19:35:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742459_1635 (size=12454) 2024-11-12T19:35:42,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33067 {}] regionserver.HRegion(8581): Flush requested on 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:42,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. as already flushing 2024-11-12T19:35:42,824 DEBUG [Thread-2407 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x271e8143 to 127.0.0.1:60358 2024-11-12T19:35:42,824 DEBUG [Thread-2407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:42,825 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241112d3d1aaad67d9466aabd2af421a5a5b52_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d3d1aaad67d9466aabd2af421a5a5b52_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/d051d7daa6424cf19366e9057e5bc437, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/d051d7daa6424cf19366e9057e5bc437 is 175, key is test_row_0/A:col10/1731440140692/Put/seqid=0 2024-11-12T19:35:42,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742460_1636 (size=31255) 2024-11-12T19:35:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:43,234 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/d051d7daa6424cf19366e9057e5bc437 2024-11-12T19:35:43,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/40ac7729ec95418ea8786437f1faef3b is 50, key is test_row_0/B:col10/1731440140692/Put/seqid=0 2024-11-12T19:35:43,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742461_1637 (size=12301) 2024-11-12T19:35:43,327 DEBUG [Thread-2399 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55544bc7 to 127.0.0.1:60358 2024-11-12T19:35:43,327 DEBUG [Thread-2399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:43,333 DEBUG [Thread-2403 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d0a9e33 to 127.0.0.1:60358 2024-11-12T19:35:43,333 DEBUG [Thread-2403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:43,334 DEBUG [Thread-2401 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1208728f to 127.0.0.1:60358 2024-11-12T19:35:43,334 DEBUG [Thread-2401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:43,349 DEBUG [Thread-2405 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40e55f2a to 127.0.0.1:60358 2024-11-12T19:35:43,349 DEBUG [Thread-2405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:43,648 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/40ac7729ec95418ea8786437f1faef3b 2024-11-12T19:35:43,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/84cd14d9043b4ca38ac21aa617c32754 is 50, key is test_row_0/C:col10/1731440140692/Put/seqid=0 2024-11-12T19:35:43,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742462_1638 (size=12301) 2024-11-12T19:35:44,059 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/84cd14d9043b4ca38ac21aa617c32754 2024-11-12T19:35:44,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/d051d7daa6424cf19366e9057e5bc437 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/d051d7daa6424cf19366e9057e5bc437 2024-11-12T19:35:44,069 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/d051d7daa6424cf19366e9057e5bc437, entries=150, sequenceid=372, filesize=30.5 K 2024-11-12T19:35:44,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/40ac7729ec95418ea8786437f1faef3b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/40ac7729ec95418ea8786437f1faef3b 2024-11-12T19:35:44,073 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/40ac7729ec95418ea8786437f1faef3b, entries=150, sequenceid=372, filesize=12.0 K 2024-11-12T19:35:44,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/84cd14d9043b4ca38ac21aa617c32754 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/84cd14d9043b4ca38ac21aa617c32754 2024-11-12T19:35:44,077 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/84cd14d9043b4ca38ac21aa617c32754, entries=150, sequenceid=372, filesize=12.0 K 2024-11-12T19:35:44,078 INFO [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=33.54 KB/34350 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1673ms, sequenceid=372, compaction requested=true 2024-11-12T19:35:44,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:44,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:44,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/81d69e608036:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-12T19:35:44,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-12T19:35:44,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-12T19:35:44,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2090 sec 2024-11-12T19:35:44,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 3.2130 sec 2024-11-12T19:35:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-12T19:35:44,975 INFO [Thread-2409 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4655 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4674 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4572 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4650 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4680 2024-11-12T19:35:44,975 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-12T19:35:44,975 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:35:44,975 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a1fe6e4 to 127.0.0.1:60358 2024-11-12T19:35:44,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:44,976 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-12T19:35:44,976 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-12T19:35:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:44,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:44,979 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440144978"}]},"ts":"1731440144978"} 2024-11-12T19:35:44,980 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-12T19:35:45,029 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-12T19:35:45,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-12T19:35:45,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, UNASSIGN}] 2024-11-12T19:35:45,035 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, UNASSIGN 2024-11-12T19:35:45,036 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=CLOSING, regionLocation=81d69e608036,33067,1731439956493 2024-11-12T19:35:45,038 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-12T19:35:45,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493}] 2024-11-12T19:35:45,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:45,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 81d69e608036,33067,1731439956493 2024-11-12T19:35:45,191 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:45,191 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-12T19:35:45,191 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing 9487c3b0150a5aa38a9544f87bbf2a6e, disabling compactions & flushes 2024-11-12T19:35:45,191 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:45,191 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:45,191 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. after waiting 0 ms 2024-11-12T19:35:45,191 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:45,192 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing 9487c3b0150a5aa38a9544f87bbf2a6e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=A 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=B 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9487c3b0150a5aa38a9544f87bbf2a6e, store=C 2024-11-12T19:35:45,192 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-12T19:35:45,201 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111285fe67894f224302a34b6c10d6420d1c_9487c3b0150a5aa38a9544f87bbf2a6e is 50, key is test_row_0/A:col10/1731440143347/Put/seqid=0 2024-11-12T19:35:45,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742463_1639 (size=12454) 2024-11-12T19:35:45,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:45,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:45,605 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T19:35:45,607 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111285fe67894f224302a34b6c10d6420d1c_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111285fe67894f224302a34b6c10d6420d1c_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:45,608 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/0ffb28958fd346f499fd5c5321e6860b, store: [table=TestAcidGuarantees family=A region=9487c3b0150a5aa38a9544f87bbf2a6e] 2024-11-12T19:35:45,609 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/0ffb28958fd346f499fd5c5321e6860b is 175, key is test_row_0/A:col10/1731440143347/Put/seqid=0 2024-11-12T19:35:45,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742464_1640 (size=31255) 2024-11-12T19:35:45,612 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=380, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/0ffb28958fd346f499fd5c5321e6860b 2024-11-12T19:35:45,616 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb is 50, key is test_row_0/B:col10/1731440143347/Put/seqid=0 2024-11-12T19:35:45,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742465_1641 (size=12301) 2024-11-12T19:35:46,020 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb 2024-11-12T19:35:46,029 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2b9ec9ce38d44ab98cbba56a30feb452 is 50, key is test_row_0/C:col10/1731440143347/Put/seqid=0 2024-11-12T19:35:46,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742466_1642 (size=12301) 2024-11-12T19:35:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:46,434 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2b9ec9ce38d44ab98cbba56a30feb452 2024-11-12T19:35:46,440 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/A/0ffb28958fd346f499fd5c5321e6860b as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/0ffb28958fd346f499fd5c5321e6860b 2024-11-12T19:35:46,442 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/0ffb28958fd346f499fd5c5321e6860b, entries=150, sequenceid=380, filesize=30.5 K 2024-11-12T19:35:46,443 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb 2024-11-12T19:35:46,445 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb, entries=150, sequenceid=380, filesize=12.0 K 2024-11-12T19:35:46,446 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/.tmp/C/2b9ec9ce38d44ab98cbba56a30feb452 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2b9ec9ce38d44ab98cbba56a30feb452 2024-11-12T19:35:46,449 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2b9ec9ce38d44ab98cbba56a30feb452, entries=150, sequenceid=380, filesize=12.0 K 2024-11-12T19:35:46,449 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9487c3b0150a5aa38a9544f87bbf2a6e in 1258ms, sequenceid=380, compaction requested=true 2024-11-12T19:35:46,450 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244] to archive 2024-11-12T19:35:46,451 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:46,452 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9471ba77acc445b89696cf1e5c09a270 2024-11-12T19:35:46,453 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/ec651701017f481d9e111f881469034a 2024-11-12T19:35:46,454 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/c17b2372a43c401a9872b1847080368b 2024-11-12T19:35:46,454 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/892824744e8d440a9cc8d2c1c06e6305 2024-11-12T19:35:46,455 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/f85c9f16669744a78f6839a3d9a7265b 2024-11-12T19:35:46,456 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/dbbacadaa1d94efe95dd2443cd053847 2024-11-12T19:35:46,457 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/07fbd840539149168f6dc04de7a42fbb 2024-11-12T19:35:46,458 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/a5c43ec287844250bdfefa1b06557355 2024-11-12T19:35:46,458 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/947004ae23044b70849162efb2f74aad 2024-11-12T19:35:46,459 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/6aee2d548687410baf624733dce76c52 2024-11-12T19:35:46,460 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/472174221a884593bb512ed834c8aa3b 2024-11-12T19:35:46,461 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/b6531ae6d0c14ee1a38c14b1b774f5d7 2024-11-12T19:35:46,462 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/8fa3a12dae184876a6ce6272759d3be4 2024-11-12T19:35:46,463 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/34d108e4bbf34a4ebe881e44b15828c8 2024-11-12T19:35:46,464 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/132059ce740f4c4ca4d08f69a5ac5526 2024-11-12T19:35:46,464 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e78cb6fc0c2469d9330515e413b5d6e 2024-11-12T19:35:46,465 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/9dad5d6f1f3d4173a78178296ee371c0 2024-11-12T19:35:46,466 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/5e5a2276abc3411898ae7e78a2b7f172 2024-11-12T19:35:46,466 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/acef769068194dc6854ffb8685cd81a6 2024-11-12T19:35:46,467 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7b32cd39fb9f498f942f9d18a01ef26e 2024-11-12T19:35:46,468 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/7868c95b9b964e09bbad2eaead237e9f 2024-11-12T19:35:46,469 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/bf3b5c05b62748ba8eb31cac14ecf244 2024-11-12T19:35:46,470 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7eef6f09ce6e418e8cb581a72aa001c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/fa9389f7041c4615a935e9b3df40e9b5, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/901c536d8efa424bb57bba90119709ce, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/27c3e80795de41f0b451fa7db8214e8c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6ec7fbe0b22549b4afad04ec7c801d81, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec] to archive 2024-11-12T19:35:46,471 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:46,472 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6e9feda1b65a4615bd74500f4638d17b 2024-11-12T19:35:46,473 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d9e0c212781448f0aea8fe234b648dfb 2024-11-12T19:35:46,474 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/faec61cdf01f47cfa6556329d070d782 2024-11-12T19:35:46,475 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/af88a9399e304b838574fbc92b6b9c31 2024-11-12T19:35:46,476 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/9bc037b94fd146dd8fdf4e3508c3d055 2024-11-12T19:35:46,476 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7eef6f09ce6e418e8cb581a72aa001c7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7eef6f09ce6e418e8cb581a72aa001c7 2024-11-12T19:35:46,477 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/47afa6a2c32647a5a2eb8d6ae84c5972 2024-11-12T19:35:46,478 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/7440a0c4733a4c908dd7df6667924941 2024-11-12T19:35:46,479 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/fa9389f7041c4615a935e9b3df40e9b5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/fa9389f7041c4615a935e9b3df40e9b5 2024-11-12T19:35:46,480 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/f950bb139d73443eb488c2d241bb5520 2024-11-12T19:35:46,480 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/2da858a8fef34bb6bbe172d65dd07a1e 2024-11-12T19:35:46,481 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/901c536d8efa424bb57bba90119709ce to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/901c536d8efa424bb57bba90119709ce 2024-11-12T19:35:46,482 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/38e61aabdd2543b793901ef13cc3b5a7 2024-11-12T19:35:46,483 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/484b5735902c4f7d9419000fbc627b46 2024-11-12T19:35:46,484 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/51af0b237f0d4d198bc823ec6c935642 2024-11-12T19:35:46,484 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/27c3e80795de41f0b451fa7db8214e8c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/27c3e80795de41f0b451fa7db8214e8c 2024-11-12T19:35:46,490 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/664f74bb44a4470f957dd8b533c8cdae 2024-11-12T19:35:46,491 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/35926df7e443488d8ea8da900b8ef04c 2024-11-12T19:35:46,492 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6ec7fbe0b22549b4afad04ec7c801d81 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/6ec7fbe0b22549b4afad04ec7c801d81 2024-11-12T19:35:46,493 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/74d19d0c413a446987faab342137adf9 2024-11-12T19:35:46,494 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/d18164f5f0e0430e98ecd7073b4d368b 2024-11-12T19:35:46,495 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c4647d20e5874a4a976b53e562c8d1ec 2024-11-12T19:35:46,496 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f50243a15ac74fba994ff3ed9da0c699, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f597621b57634d52b1933ed984b95d33, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/aeb5a51d5a6244519adf77280ea85fe1, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/dd2506c1ff1d401fa7729b075134384f, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/bf63bc594b8443898d52464b5b221065, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c] to archive 2024-11-12T19:35:46,496 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T19:35:46,498 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/27ec76de0c95468b93feb04d71d452e0 2024-11-12T19:35:46,499 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/3d5bcce152f1415c899f03de213eda46 2024-11-12T19:35:46,499 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/94e7f54bf41440f98778fc6ec0f1fa6e 2024-11-12T19:35:46,500 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1289fd5d27ef4737a94936383ee04a57 2024-11-12T19:35:46,501 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/4ad0681f47234a2bbc24f47b7e18914e 2024-11-12T19:35:46,502 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f50243a15ac74fba994ff3ed9da0c699 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f50243a15ac74fba994ff3ed9da0c699 2024-11-12T19:35:46,503 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/0ff43cf6b7764de687c53bce9406b356 2024-11-12T19:35:46,503 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1b1d8b3cec1c468d8346d50c3d2ca1f6 2024-11-12T19:35:46,504 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f758ff5326c74cfd850a852526ec450b 2024-11-12T19:35:46,505 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f597621b57634d52b1933ed984b95d33 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f597621b57634d52b1933ed984b95d33 2024-11-12T19:35:46,506 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/98fbd153d3554c6bbafa362ccd91b9c7 2024-11-12T19:35:46,507 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d8cf3e869e4a4907963db1aa9b36d82e 2024-11-12T19:35:46,508 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/aeb5a51d5a6244519adf77280ea85fe1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/aeb5a51d5a6244519adf77280ea85fe1 2024-11-12T19:35:46,508 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/cf40548692584de1aac3b03393d8ea5c 2024-11-12T19:35:46,509 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/989458e246c14a3d85fa52471640a358 2024-11-12T19:35:46,510 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/dd2506c1ff1d401fa7729b075134384f to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/dd2506c1ff1d401fa7729b075134384f 2024-11-12T19:35:46,512 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/e03b9aa7e025433189df61d13f183f76 2024-11-12T19:35:46,513 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/5b5923dc058f49a487ca933052ee7383 2024-11-12T19:35:46,513 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/bf63bc594b8443898d52464b5b221065 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/bf63bc594b8443898d52464b5b221065 2024-11-12T19:35:46,514 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/f7eefab1b1f840e69c098c514d34a664 2024-11-12T19:35:46,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2cfadbedd6d34f1985b00d17a6c6fced 2024-11-12T19:35:46,515 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:41367 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:41367,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T19:35:46,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/6244c6dd95324008b6208661e0071c5c 2024-11-12T19:35:46,518 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits/383.seqid, newMaxSeqId=383, maxSeqId=4 2024-11-12T19:35:46,519 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e. 2024-11-12T19:35:46,519 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for 9487c3b0150a5aa38a9544f87bbf2a6e: 2024-11-12T19:35:46,520 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed 9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:46,520 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=9487c3b0150a5aa38a9544f87bbf2a6e, regionState=CLOSED 2024-11-12T19:35:46,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-12T19:35:46,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure 9487c3b0150a5aa38a9544f87bbf2a6e, server=81d69e608036,33067,1731439956493 in 1.4830 sec 2024-11-12T19:35:46,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-11-12T19:35:46,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9487c3b0150a5aa38a9544f87bbf2a6e, UNASSIGN in 1.4890 sec 2024-11-12T19:35:46,524 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-12T19:35:46,524 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4930 sec 2024-11-12T19:35:46,525 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731440146524"}]},"ts":"1731440146524"} 2024-11-12T19:35:46,525 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-12T19:35:46,561 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-12T19:35:46,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5860 sec 2024-11-12T19:35:47,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-12T19:35:47,085 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-12T19:35:47,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-12T19:35:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,089 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,091 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-12T19:35:47,095 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,100 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C, FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits] 2024-11-12T19:35:47,104 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/01d6421ad47a48a08153c08c2673a3d5 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/01d6421ad47a48a08153c08c2673a3d5 2024-11-12T19:35:47,106 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/0ffb28958fd346f499fd5c5321e6860b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/0ffb28958fd346f499fd5c5321e6860b 2024-11-12T19:35:47,108 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/398c3da064004487b614bbd3889de6c1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/398c3da064004487b614bbd3889de6c1 2024-11-12T19:35:47,109 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/d051d7daa6424cf19366e9057e5bc437 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/A/d051d7daa6424cf19366e9057e5bc437 2024-11-12T19:35:47,111 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/40ac7729ec95418ea8786437f1faef3b to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/40ac7729ec95418ea8786437f1faef3b 2024-11-12T19:35:47,112 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/b3e066953b1e48cebc7c5917b41647e9 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/b3e066953b1e48cebc7c5917b41647e9 2024-11-12T19:35:47,114 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c3b094cdf6cb4ec39407d81709ef8d41 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/c3b094cdf6cb4ec39407d81709ef8d41 2024-11-12T19:35:47,115 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/B/ee3dbc9a488d43bbb4c3ec645a0cf3bb 2024-11-12T19:35:47,117 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1769c542caaa4eac91b564b47cc3dd49 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/1769c542caaa4eac91b564b47cc3dd49 2024-11-12T19:35:47,118 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2b9ec9ce38d44ab98cbba56a30feb452 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/2b9ec9ce38d44ab98cbba56a30feb452 2024-11-12T19:35:47,119 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/84cd14d9043b4ca38ac21aa617c32754 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/84cd14d9043b4ca38ac21aa617c32754 2024-11-12T19:35:47,121 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d0f0fe47cd544b36b21a83bc94a45bf1 to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/C/d0f0fe47cd544b36b21a83bc94a45bf1 2024-11-12T19:35:47,123 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits/383.seqid to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e/recovered.edits/383.seqid 2024-11-12T19:35:47,124 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/default/TestAcidGuarantees/9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,124 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-12T19:35:47,124 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:35:47,125 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-12T19:35:47,129 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112012e6da16c1b48579aad003e7699c674_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112012e6da16c1b48579aad003e7699c674_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,130 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111206c245cda26e4ea99073934d7bd14fd9_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111206c245cda26e4ea99073934d7bd14fd9_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,132 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212afcfcf12764d659258f7ab6088543e_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111212afcfcf12764d659258f7ab6088543e_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,133 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123449fd1add18455b9dbb0388720e7e36_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411123449fd1add18455b9dbb0388720e7e36_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,135 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111235db6c6e7e8d4bba9151a22a96c3066c_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111235db6c6e7e8d4bba9151a22a96c3066c_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,136 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111247a187e2d6fd47f0bd4ab043b3c39f36_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111247a187e2d6fd47f0bd4ab043b3c39f36_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,137 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111251b33b09aa8c4565852c3db84e3bf6be_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111251b33b09aa8c4565852c3db84e3bf6be_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,138 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125fb44fa1ca6d4bc884df3040753f49b7_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411125fb44fa1ca6d4bc884df3040753f49b7_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,140 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112614a57468fe7468f9b4570882c558971_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112614a57468fe7468f9b4570882c558971_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,141 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111276d572e39cf14347b18b405ffc13ad3f_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111276d572e39cf14347b18b405ffc13ad3f_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,142 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282c64ccc643a4298bfbe79cf9ca134c1_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111282c64ccc643a4298bfbe79cf9ca134c1_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,143 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111285fe67894f224302a34b6c10d6420d1c_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111285fe67894f224302a34b6c10d6420d1c_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,144 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a2d66723aa9447a88c2dca65493ce772_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112a2d66723aa9447a88c2dca65493ce772_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,145 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ac35f7f567b54132a5a29d8915a930dd_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ac35f7f567b54132a5a29d8915a930dd_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,146 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112cc739871163846d8b2049e4788847392_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112cc739871163846d8b2049e4788847392_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,147 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d0b6d372533647c6b85aaf6f559d8027_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d0b6d372533647c6b85aaf6f559d8027_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,147 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d3d1aaad67d9466aabd2af421a5a5b52_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112d3d1aaad67d9466aabd2af421a5a5b52_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,148 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112df510198addc4081a350028328d57a44_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112df510198addc4081a350028328d57a44_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,149 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ee94b16cefc544d28c2119e562250e58_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112ee94b16cefc544d28c2119e562250e58_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,150 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112fabd77123b5e4720ba675c2fa44edc97_9487c3b0150a5aa38a9544f87bbf2a6e to hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241112fabd77123b5e4720ba675c2fa44edc97_9487c3b0150a5aa38a9544f87bbf2a6e 2024-11-12T19:35:47,150 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-12T19:35:47,152 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,153 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-12T19:35:47,155 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-12T19:35:47,155 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,155 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-12T19:35:47,156 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731440147155"}]},"ts":"9223372036854775807"} 2024-11-12T19:35:47,157 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-12T19:35:47,157 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9487c3b0150a5aa38a9544f87bbf2a6e, NAME => 'TestAcidGuarantees,,1731440117714.9487c3b0150a5aa38a9544f87bbf2a6e.', STARTKEY => '', ENDKEY => ''}] 2024-11-12T19:35:47,157 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-12T19:35:47,157 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731440147157"}]},"ts":"9223372036854775807"} 2024-11-12T19:35:47,158 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-12T19:35:47,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-12T19:35:47,195 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-12T19:35:47,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 108 msec 2024-11-12T19:35:47,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46265 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-12T19:35:47,393 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-12T19:35:47,406 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=459 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=760 (was 1013), ProcessCount=11 (was 11), AvailableMemoryMB=550 (was 873) 2024-11-12T19:35:47,406 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-12T19:35:47,406 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-12T19:35:47,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x033bfacb to 127.0.0.1:60358 2024-11-12T19:35:47,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:47,406 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T19:35:47,406 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1934667321, stopped=false 2024-11-12T19:35:47,407 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=81d69e608036,46265,1731439955074 2024-11-12T19:35:47,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T19:35:47,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T19:35:47,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:35:47,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:35:47,412 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-12T19:35:47,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:47,413 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '81d69e608036,33067,1731439956493' ***** 2024-11-12T19:35:47,413 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-12T19:35:47,413 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T19:35:47,413 INFO [RS:0;81d69e608036:33067 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T19:35:47,413 INFO [RS:0;81d69e608036:33067 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T19:35:47,413 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-12T19:35:47,413 INFO [RS:0;81d69e608036:33067 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T19:35:47,414 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(3579): Received CLOSE for 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1224): stopping server 81d69e608036,33067,1731439956493 2024-11-12T19:35:47,414 DEBUG [RS:0;81d69e608036:33067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T19:35:47,414 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4793b237becb5eefe1e5fde3a3e5b617, disabling compactions & flushes 2024-11-12T19:35:47,415 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. after waiting 0 ms 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:35:47,415 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-12T19:35:47,415 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4793b237becb5eefe1e5fde3a3e5b617 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-12T19:35:47,415 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 4793b237becb5eefe1e5fde3a3e5b617=hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617.} 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-12T19:35:47,415 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T19:35:47,415 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T19:35:47,415 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-12T19:35:47,418 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4793b237becb5eefe1e5fde3a3e5b617 2024-11-12T19:35:47,433 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/.tmp/info/d745015760dd45b3bae03475a8741967 is 45, key is default/info:d/1731439963785/Put/seqid=0 2024-11-12T19:35:47,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742467_1643 (size=5037) 2024-11-12T19:35:47,437 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/.tmp/info/d745015760dd45b3bae03475a8741967 2024-11-12T19:35:47,440 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/.tmp/info/d745015760dd45b3bae03475a8741967 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/info/d745015760dd45b3bae03475a8741967 2024-11-12T19:35:47,442 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/info/f7d17d9afbac4a8cb6102de136da2688 is 143, key is hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617./info:regioninfo/1731439963618/Put/seqid=0 2024-11-12T19:35:47,443 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/info/d745015760dd45b3bae03475a8741967, entries=2, sequenceid=6, filesize=4.9 K 2024-11-12T19:35:47,444 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4793b237becb5eefe1e5fde3a3e5b617 in 29ms, sequenceid=6, compaction requested=false 2024-11-12T19:35:47,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742468_1644 (size=7725) 2024-11-12T19:35:47,446 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/info/f7d17d9afbac4a8cb6102de136da2688 2024-11-12T19:35:47,447 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/namespace/4793b237becb5eefe1e5fde3a3e5b617/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-12T19:35:47,448 INFO [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:35:47,448 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4793b237becb5eefe1e5fde3a3e5b617: 2024-11-12T19:35:47,448 DEBUG [RS_CLOSE_REGION-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1731439962366.4793b237becb5eefe1e5fde3a3e5b617. 2024-11-12T19:35:47,464 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/rep_barrier/d15aaa102024432588a3835205e40d2d is 102, key is TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a./rep_barrier:/1731439992801/DeleteFamily/seqid=0 2024-11-12T19:35:47,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742469_1645 (size=6025) 2024-11-12T19:35:47,476 INFO [regionserver/81d69e608036:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T19:35:47,618 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-12T19:35:47,819 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-12T19:35:47,867 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/rep_barrier/d15aaa102024432588a3835205e40d2d 2024-11-12T19:35:47,884 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/table/fb2b7ee2b63d4cc6bd5b87a5653930c5 is 96, key is TestAcidGuarantees,,1731439964331.76d7848c1ddd620b84cb604cad3a693a./table:/1731439992801/DeleteFamily/seqid=0 2024-11-12T19:35:47,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742470_1646 (size=5942) 2024-11-12T19:35:47,998 INFO [regionserver/81d69e608036:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T19:35:47,998 INFO [regionserver/81d69e608036:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T19:35:48,019 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-12T19:35:48,219 DEBUG [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-12T19:35:48,288 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/table/fb2b7ee2b63d4cc6bd5b87a5653930c5 2024-11-12T19:35:48,291 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/info/f7d17d9afbac4a8cb6102de136da2688 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/info/f7d17d9afbac4a8cb6102de136da2688 2024-11-12T19:35:48,294 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/info/f7d17d9afbac4a8cb6102de136da2688, entries=22, sequenceid=93, filesize=7.5 K 2024-11-12T19:35:48,295 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/rep_barrier/d15aaa102024432588a3835205e40d2d as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/rep_barrier/d15aaa102024432588a3835205e40d2d 2024-11-12T19:35:48,298 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/rep_barrier/d15aaa102024432588a3835205e40d2d, entries=6, sequenceid=93, filesize=5.9 K 2024-11-12T19:35:48,298 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/.tmp/table/fb2b7ee2b63d4cc6bd5b87a5653930c5 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/table/fb2b7ee2b63d4cc6bd5b87a5653930c5 2024-11-12T19:35:48,301 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/table/fb2b7ee2b63d4cc6bd5b87a5653930c5, entries=9, sequenceid=93, filesize=5.8 K 2024-11-12T19:35:48,302 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 887ms, sequenceid=93, compaction requested=false 2024-11-12T19:35:48,305 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-12T19:35:48,305 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T19:35:48,305 INFO [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-12T19:35:48,305 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-12T19:35:48,306 DEBUG [RS_CLOSE_META-regionserver/81d69e608036:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T19:35:48,419 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1250): stopping server 81d69e608036,33067,1731439956493; all regions closed. 2024-11-12T19:35:48,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741834_1010 (size=26050) 2024-11-12T19:35:48,431 DEBUG [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/oldWALs 2024-11-12T19:35:48,431 INFO [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 81d69e608036%2C33067%2C1731439956493.meta:.meta(num 1731439961694) 2024-11-12T19:35:48,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741833_1009 (size=14800216) 2024-11-12T19:35:48,436 DEBUG [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/oldWALs 2024-11-12T19:35:48,436 INFO [RS:0;81d69e608036:33067 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 81d69e608036%2C33067%2C1731439956493:(num 1731439961189) 2024-11-12T19:35:48,436 DEBUG [RS:0;81d69e608036:33067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:48,436 INFO [RS:0;81d69e608036:33067 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T19:35:48,437 INFO [RS:0;81d69e608036:33067 {}] hbase.ChoreService(370): Chore service for: regionserver/81d69e608036:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-12T19:35:48,437 INFO [regionserver/81d69e608036:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-12T19:35:48,438 INFO [RS:0;81d69e608036:33067 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:33067 2024-11-12T19:35:48,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/81d69e608036,33067,1731439956493 2024-11-12T19:35:48,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T19:35:48,487 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [81d69e608036,33067,1731439956493] 2024-11-12T19:35:48,487 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 81d69e608036,33067,1731439956493; numProcessing=1 2024-11-12T19:35:48,495 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/81d69e608036,33067,1731439956493 already deleted, retry=false 2024-11-12T19:35:48,495 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 81d69e608036,33067,1731439956493 expired; onlineServers=0 2024-11-12T19:35:48,496 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '81d69e608036,46265,1731439955074' ***** 2024-11-12T19:35:48,496 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T19:35:48,496 DEBUG [M:0;81d69e608036:46265 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13dd28a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=81d69e608036/172.17.0.3:0 2024-11-12T19:35:48,497 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegionServer(1224): stopping server 81d69e608036,46265,1731439955074 2024-11-12T19:35:48,497 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegionServer(1250): stopping server 81d69e608036,46265,1731439955074; all regions closed. 2024-11-12T19:35:48,497 DEBUG [M:0;81d69e608036:46265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T19:35:48,497 DEBUG [M:0;81d69e608036:46265 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T19:35:48,498 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T19:35:48,498 DEBUG [M:0;81d69e608036:46265 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T19:35:48,498 DEBUG [master/81d69e608036:0:becomeActiveMaster-HFileCleaner.small.0-1731439960537 {}] cleaner.HFileCleaner(306): Exit Thread[master/81d69e608036:0:becomeActiveMaster-HFileCleaner.small.0-1731439960537,5,FailOnTimeoutGroup] 2024-11-12T19:35:48,498 DEBUG [master/81d69e608036:0:becomeActiveMaster-HFileCleaner.large.0-1731439960526 {}] cleaner.HFileCleaner(306): Exit Thread[master/81d69e608036:0:becomeActiveMaster-HFileCleaner.large.0-1731439960526,5,FailOnTimeoutGroup] 2024-11-12T19:35:48,499 INFO [M:0;81d69e608036:46265 {}] hbase.ChoreService(370): Chore service for: master/81d69e608036:0 had [] on shutdown 2024-11-12T19:35:48,499 DEBUG [M:0;81d69e608036:46265 {}] master.HMaster(1733): Stopping service threads 2024-11-12T19:35:48,499 INFO [M:0;81d69e608036:46265 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T19:35:48,500 ERROR [M:0;81d69e608036:46265 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[LeaseRenewer:jenkins@localhost:41367,5,PEWorkerGroup] Thread[IPC Client (150195751) connection to localhost/127.0.0.1:41367 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41367,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-12T19:35:48,501 INFO [M:0;81d69e608036:46265 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T19:35:48,501 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T19:35:48,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T19:35:48,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T19:35:48,503 DEBUG [M:0;81d69e608036:46265 {}] zookeeper.ZKUtil(347): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T19:35:48,503 WARN [M:0;81d69e608036:46265 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T19:35:48,504 INFO [M:0;81d69e608036:46265 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-12T19:35:48,504 INFO [M:0;81d69e608036:46265 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T19:35:48,504 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T19:35:48,504 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:35:48,504 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T19:35:48,504 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:35:48,504 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T19:35:48,504 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:35:48,504 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=779.46 KB heapSize=959.91 KB 2024-11-12T19:35:48,523 DEBUG [M:0;81d69e608036:46265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16eb08618ed948668cea55737a7ce4de is 82, key is hbase:meta,,1/info:regioninfo/1731439962055/Put/seqid=0 2024-11-12T19:35:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742471_1647 (size=5672) 2024-11-12T19:35:48,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T19:35:48,587 INFO [RS:0;81d69e608036:33067 {}] regionserver.HRegionServer(1307): Exiting; stopping=81d69e608036,33067,1731439956493; zookeeper connection closed. 2024-11-12T19:35:48,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33067-0x10131d2ab780001, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T19:35:48,588 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@350aac24 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@350aac24 2024-11-12T19:35:48,588 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T19:35:48,926 INFO [M:0;81d69e608036:46265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2231 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16eb08618ed948668cea55737a7ce4de 2024-11-12T19:35:48,960 DEBUG [M:0;81d69e608036:46265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd4d3e371e814de7a7523fd8e88a7f01 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\x90/proc:d/1731440120810/Put/seqid=0 2024-11-12T19:35:48,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742472_1648 (size=43686) 2024-11-12T19:35:48,968 INFO [M:0;81d69e608036:46265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=778.90 KB at sequenceid=2231 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd4d3e371e814de7a7523fd8e88a7f01 2024-11-12T19:35:48,971 INFO [M:0;81d69e608036:46265 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dd4d3e371e814de7a7523fd8e88a7f01 2024-11-12T19:35:48,994 DEBUG [M:0;81d69e608036:46265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530de0c234af4950ad718fa83283da18 is 69, key is 81d69e608036,33067,1731439956493/rs:state/1731439960655/Put/seqid=0 2024-11-12T19:35:48,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073742473_1649 (size=5156) 2024-11-12T19:35:49,398 INFO [M:0;81d69e608036:46265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2231 (bloomFilter=true), to=hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530de0c234af4950ad718fa83283da18 2024-11-12T19:35:49,401 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16eb08618ed948668cea55737a7ce4de as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/16eb08618ed948668cea55737a7ce4de 2024-11-12T19:35:49,405 INFO [M:0;81d69e608036:46265 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/16eb08618ed948668cea55737a7ce4de, entries=8, sequenceid=2231, filesize=5.5 K 2024-11-12T19:35:49,406 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd4d3e371e814de7a7523fd8e88a7f01 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dd4d3e371e814de7a7523fd8e88a7f01 2024-11-12T19:35:49,409 INFO [M:0;81d69e608036:46265 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dd4d3e371e814de7a7523fd8e88a7f01 2024-11-12T19:35:49,409 INFO [M:0;81d69e608036:46265 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dd4d3e371e814de7a7523fd8e88a7f01, entries=171, sequenceid=2231, filesize=42.7 K 2024-11-12T19:35:49,409 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530de0c234af4950ad718fa83283da18 as hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/530de0c234af4950ad718fa83283da18 2024-11-12T19:35:49,412 INFO [M:0;81d69e608036:46265 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41367/user/jenkins/test-data/0925acb0-7506-db0d-5626-19176fec32e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/530de0c234af4950ad718fa83283da18, entries=1, sequenceid=2231, filesize=5.0 K 2024-11-12T19:35:49,413 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegion(3040): Finished flush of dataSize ~779.46 KB/798167, heapSize ~959.61 KB/982640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 909ms, sequenceid=2231, compaction requested=false 2024-11-12T19:35:49,418 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T19:35:49,418 DEBUG [M:0;81d69e608036:46265 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-12T19:35:49,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43501 is added to blk_1073741830_1006 (size=944389) 2024-11-12T19:35:49,421 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-12T19:35:49,421 INFO [M:0;81d69e608036:46265 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-12T19:35:49,421 INFO [M:0;81d69e608036:46265 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:46265 2024-11-12T19:35:49,428 DEBUG [M:0;81d69e608036:46265 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/81d69e608036,46265,1731439955074 already deleted, retry=false 2024-11-12T19:35:49,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T19:35:49,537 INFO [M:0;81d69e608036:46265 {}] regionserver.HRegionServer(1307): Exiting; stopping=81d69e608036,46265,1731439955074; zookeeper connection closed. 2024-11-12T19:35:49,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46265-0x10131d2ab780000, quorum=127.0.0.1:60358, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T19:35:49,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76026208{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T19:35:49,548 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7489a976{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T19:35:49,548 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T19:35:49,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T19:35:49,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@200c8689{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.log.dir/,STOPPED} 2024-11-12T19:35:49,553 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T19:35:49,553 WARN [BP-1795121182-172.17.0.3-1731439949678 heartbeating to localhost/127.0.0.1:41367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T19:35:49,553 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T19:35:49,553 WARN [BP-1795121182-172.17.0.3-1731439949678 heartbeating to localhost/127.0.0.1:41367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1795121182-172.17.0.3-1731439949678 (Datanode Uuid 52363b5f-b0a7-403b-89d2-318900fbb25e) service to localhost/127.0.0.1:41367 2024-11-12T19:35:49,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data1/current/BP-1795121182-172.17.0.3-1731439949678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T19:35:49,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/cluster_4e8a161d-4924-73d8-2773-d81d718de17a/dfs/data/data2/current/BP-1795121182-172.17.0.3-1731439949678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T19:35:49,558 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T19:35:49,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T19:35:49,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T19:35:49,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T19:35:49,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T19:35:49,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/26edda71-3aa2-0779-3e91-8100b1ba0d89/hadoop.log.dir/,STOPPED} 2024-11-12T19:35:49,606 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-12T19:35:49,780 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down